repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
Knarik1/transformers
|
[
"c2a7d7280250addae38a49c31a57ddd897be2065",
"c2a7d7280250addae38a49c31a57ddd897be2065",
"c2a7d7280250addae38a49c31a57ddd897be2065",
"c2a7d7280250addae38a49c31a57ddd897be2065",
"c2a7d7280250addae38a49c31a57ddd897be2065"
] |
[
"examples/tensorflow/multiple-choice/run_swag.py",
"src/transformers/models/mobilebert/modeling_mobilebert.py",
"src/transformers/models/funnel/modeling_funnel.py",
"src/transformers/models/layoutlm/modeling_tf_layoutlm.py",
"src/transformers/models/roformer/modeling_tf_roformer.py"
] |
[
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for multiple choice.\n\"\"\"\n# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Optional\n\nimport datasets\nimport numpy as np\nimport tensorflow as tf\nfrom datasets import load_dataset\n\nimport transformers\nfrom transformers import (\n CONFIG_NAME,\n TF2_WEIGHTS_NAME,\n AutoConfig,\n AutoTokenizer,\n HfArgumentParser,\n TFAutoModelForMultipleChoice,\n TFTrainingArguments,\n create_optimizer,\n set_seed,\n)\nfrom transformers.utils import check_min_version\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.15.0.dev0\")\n\nlogger = logging.getLogger(__name__)\n\n\n# region Helper classes and functions\nclass SavePretrainedCallback(tf.keras.callbacks.Callback):\n # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary\n # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback\n # that saves the model with this method after each epoch.\n def __init__(self, output_dir, **kwargs):\n super().__init__()\n self.output_dir = output_dir\n\n def on_epoch_end(self, epoch, logs=None):\n self.model.save_pretrained(self.output_dir)\n\n\ndef convert_dataset_for_tensorflow(\n dataset, non_label_column_names, batch_size, dataset_mode=\"variable_batch\", shuffle=True, drop_remainder=True\n):\n \"\"\"Converts a Hugging Face dataset to a Tensorflow Dataset. The dataset_mode controls whether we pad all batches\n to the maximum sequence length, or whether we only pad to the maximum length within that batch. The former\n is most useful when training on TPU, as a new graph compilation is required for each sequence length.\n \"\"\"\n\n def densify_ragged_batch(features, label=None):\n features = {\n feature: ragged_tensor.to_tensor(shape=batch_shape[feature]) for feature, ragged_tensor in features.items()\n }\n if label is None:\n return features\n else:\n return features, label\n\n feature_keys = list(set(dataset.features.keys()) - set(non_label_column_names + [\"label\"]))\n if dataset_mode == \"variable_batch\":\n batch_shape = {key: None for key in feature_keys}\n data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}\n elif dataset_mode == \"constant_batch\":\n data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}\n batch_shape = {\n key: tf.concat(([batch_size], ragged_tensor.bounding_shape()[1:]), axis=0)\n for key, ragged_tensor in data.items()\n }\n else:\n raise ValueError(\"Unknown dataset mode!\")\n\n if \"label\" in dataset.features:\n labels = tf.convert_to_tensor(np.array(dataset[\"label\"]))\n tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))\n else:\n tf_dataset = tf.data.Dataset.from_tensor_slices(data)\n if shuffle:\n tf_dataset = tf_dataset.shuffle(buffer_size=len(dataset))\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF\n tf_dataset = (\n tf_dataset.with_options(options)\n .batch(batch_size=batch_size, drop_remainder=drop_remainder)\n .map(densify_ragged_batch)\n )\n return tf_dataset\n\n\n# endregion\n\n# region Arguments\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a text file).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_seq_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. If passed, sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to the maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n\n def __post_init__(self):\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n\n# endregion\n\n\ndef main():\n # region Argument parsing\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n output_dir = Path(training_args.output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n # endregion\n\n # region Logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n # endregion\n\n # region Checkpoints\n checkpoint = None\n if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:\n if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():\n checkpoint = output_dir\n logger.info(\n f\"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this\"\n \" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n else:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to continue regardless.\"\n )\n # endregion\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # region Load datasets\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.train_file is not None or data_args.validation_file is not None:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.train_file.split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)\n else:\n # Downloading and loading the swag dataset from the hub.\n raw_datasets = load_dataset(\"swag\", \"regular\", cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # When using your own dataset or a different dataset from swag, you will probably need to change this.\n ending_names = [f\"ending{i}\" for i in range(4)]\n context_name = \"sent1\"\n question_header_name = \"sent2\"\n # endregion\n\n # region Load model config and tokenizer\n if checkpoint is not None:\n config_path = training_args.output_dir\n elif model_args.config_name:\n config_path = model_args.config_name\n else:\n config_path = model_args.model_name_or_path\n\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n config_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # endregion\n\n # region Dataset preprocessing\n if data_args.max_seq_length is None:\n max_seq_length = tokenizer.model_max_length\n if max_seq_length > 1024:\n logger.warning(\n f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n \"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.\"\n )\n max_seq_length = 1024\n else:\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n def preprocess_function(examples):\n first_sentences = [[context] * 4 for context in examples[context_name]]\n question_headers = examples[question_header_name]\n second_sentences = [\n [f\"{header} {examples[end][i]}\" for end in ending_names] for i, header in enumerate(question_headers)\n ]\n\n # Flatten out\n first_sentences = list(chain(*first_sentences))\n second_sentences = list(chain(*second_sentences))\n\n # Tokenize\n tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length)\n # Un-flatten\n data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}\n return data\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n non_label_columns = [feature for feature in train_dataset.features if feature not in (\"label\", \"labels\")]\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_eval:\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if not training_args.do_train:\n non_label_columns = [feature for feature in eval_dataset.features if feature not in (\"label\", \"labels\")]\n if data_args.max_eval_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n # endregion\n\n with training_args.strategy.scope():\n # region Build model\n if checkpoint is None:\n model_path = model_args.model_name_or_path\n else:\n model_path = checkpoint\n model = TFAutoModelForMultipleChoice.from_pretrained(\n model_path,\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n num_replicas = training_args.strategy.num_replicas_in_sync\n total_train_batch_size = training_args.per_device_train_batch_size * num_replicas\n total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas\n if training_args.do_train:\n total_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs)\n optimizer, lr_schedule = create_optimizer(\n init_lr=training_args.learning_rate, num_train_steps=int(total_train_steps), num_warmup_steps=0\n )\n else:\n optimizer = \"adam\" # Just put anything in here, since we're not using it anyway\n model.compile(\n optimizer=optimizer,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"accuracy\")],\n )\n # endregion\n\n # region Training\n if training_args.do_train:\n tf_train_dataset = convert_dataset_for_tensorflow(\n train_dataset, non_label_column_names=non_label_columns, batch_size=total_train_batch_size\n )\n if training_args.do_eval:\n validation_data = convert_dataset_for_tensorflow(\n eval_dataset, non_label_column_names=non_label_columns, batch_size=total_eval_batch_size\n )\n else:\n validation_data = None\n model.fit(\n tf_train_dataset,\n validation_data=validation_data,\n epochs=int(training_args.num_train_epochs),\n callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)],\n )\n # endregion\n\n # region Evaluation\n if training_args.do_eval and not training_args.do_train:\n # Do a standalone evaluation pass\n tf_eval_dataset = convert_dataset_for_tensorflow(\n eval_dataset, non_label_column_names=non_label_columns, batch_size=total_eval_batch_size\n )\n model.evaluate(tf_eval_dataset)\n # endregion\n\n # region Push to hub\n if training_args.push_to_hub:\n model.push_to_hub(\n finetuned_from=model_args.model_name_or_path,\n tasks=\"multiple-choice\",\n dataset_tags=\"swag\",\n dataset_args=\"regular\",\n dataset=\"SWAG\",\n language=\"en\",\n )\n # endregion\n\n\nif __name__ == \"__main__\":\n main()\n",
"# MIT License\n#\n# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport math\nimport os\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer\nfrom ...utils import logging\nfrom .configuration_mobilebert import MobileBertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"google/mobilebert-uncased\"\n_CONFIG_FOR_DOC = \"MobileBertConfig\"\n_TOKENIZER_FOR_DOC = \"MobileBertTokenizer\"\n\nMOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\"google/mobilebert-uncased\"]\n\n\ndef load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.replace(\"ffn_layer\", \"ffn\")\n name = name.replace(\"FakeLayerNorm\", \"LayerNorm\")\n name = name.replace(\"extra_output_weights\", \"dense/kernel\")\n name = name.replace(\"bert\", \"mobilebert\")\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass NoNorm(nn.Module):\n def __init__(self, feat_size, eps=None):\n super().__init__()\n self.bias = nn.Parameter(torch.zeros(feat_size))\n self.weight = nn.Parameter(torch.ones(feat_size))\n\n def forward(self, input_tensor):\n return input_tensor * self.weight + self.bias\n\n\nNORM2FN = {\"layer_norm\": nn.LayerNorm, \"no_norm\": NoNorm}\n\n\nclass MobileBertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.trigram_input = config.trigram_input\n self.embedding_size = config.embedding_size\n self.hidden_size = config.hidden_size\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n embed_dim_multiplier = 3 if self.trigram_input else 1\n embedded_input_size = self.embedding_size * embed_dim_multiplier\n self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)\n\n self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.trigram_input:\n # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited\n # Devices (https://arxiv.org/abs/2004.02984)\n #\n # The embedding table in BERT models accounts for a substantial proportion of model size. To compress\n # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.\n # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512\n # dimensional output.\n inputs_embeds = torch.cat(\n [\n nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),\n inputs_embeds,\n nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),\n ],\n dim=2,\n )\n if self.trigram_input or self.embedding_size != self.hidden_size:\n inputs_embeds = self.embedding_transformation(inputs_embeds)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass MobileBertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.true_hidden_size, self.all_head_size)\n self.key = nn.Linear(config.true_hidden_size, self.all_head_size)\n self.value = nn.Linear(\n config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size\n )\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask=None,\n head_mask=None,\n output_attentions=None,\n ):\n mixed_query_layer = self.query(query_tensor)\n mixed_key_layer = self.key(key_tensor)\n mixed_value_layer = self.value(value_tensor)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\nclass MobileBertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)\n if not self.use_bottleneck:\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n if not self.use_bottleneck:\n layer_outputs = self.dropout(layer_outputs)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass MobileBertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = MobileBertSelfAttention(config)\n self.output = MobileBertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_mask=None,\n head_mask=None,\n output_attentions=None,\n ):\n self_outputs = self.self(\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask,\n head_mask,\n output_attentions,\n )\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = self.output(self_outputs[0], layer_input)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass MobileBertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass OutputBottleneck(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n layer_outputs = self.dropout(layer_outputs)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass MobileBertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)\n if not self.use_bottleneck:\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n else:\n self.bottleneck = OutputBottleneck(config)\n\n def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):\n layer_output = self.dense(intermediate_states)\n if not self.use_bottleneck:\n layer_output = self.dropout(layer_output)\n layer_output = self.LayerNorm(layer_output + residual_tensor_1)\n else:\n layer_output = self.LayerNorm(layer_output + residual_tensor_1)\n layer_output = self.bottleneck(layer_output, residual_tensor_2)\n return layer_output\n\n\nclass BottleneckLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n layer_input = self.dense(hidden_states)\n layer_input = self.LayerNorm(layer_input)\n return layer_input\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.key_query_shared_bottleneck = config.key_query_shared_bottleneck\n self.use_bottleneck_attention = config.use_bottleneck_attention\n self.input = BottleneckLayer(config)\n if self.key_query_shared_bottleneck:\n self.attention = BottleneckLayer(config)\n\n def forward(self, hidden_states):\n # This method can return three different tuples of values. These different values make use of bottlenecks,\n # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory\n # usage. These linear layer have weights that are learned during training.\n #\n # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the\n # key, query, value, and \"layer input\" to be used by the attention layer.\n # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor\n # in the attention self output, after the attention scores have been computed.\n #\n # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return\n # four values, three of which have been passed through a bottleneck: the query and key, passed through the same\n # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.\n #\n # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,\n # and the residual layer will be this value passed through a bottleneck.\n\n bottlenecked_hidden_states = self.input(hidden_states)\n if self.use_bottleneck_attention:\n return (bottlenecked_hidden_states,) * 4\n elif self.key_query_shared_bottleneck:\n shared_attention_input = self.attention(hidden_states)\n return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)\n else:\n return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)\n\n\nclass FFNOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass FFNLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate = MobileBertIntermediate(config)\n self.output = FFNOutput(config)\n\n def forward(self, hidden_states):\n intermediate_output = self.intermediate(hidden_states)\n layer_outputs = self.output(intermediate_output, hidden_states)\n return layer_outputs\n\n\nclass MobileBertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.num_feedforward_networks = config.num_feedforward_networks\n\n self.attention = MobileBertAttention(config)\n self.intermediate = MobileBertIntermediate(config)\n self.output = MobileBertOutput(config)\n if self.use_bottleneck:\n self.bottleneck = Bottleneck(config)\n if config.num_feedforward_networks > 1:\n self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=None,\n ):\n if self.use_bottleneck:\n query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)\n else:\n query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4\n\n self_attention_outputs = self.attention(\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n s = (attention_output,)\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.num_feedforward_networks != 1:\n for i, ffn_module in enumerate(self.ffn):\n attention_output = ffn_module(attention_output)\n s += (attention_output,)\n\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output, hidden_states)\n outputs = (\n (layer_output,)\n + outputs\n + (\n torch.tensor(1000),\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_output,\n intermediate_output,\n )\n + s\n )\n return outputs\n\n\nclass MobileBertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n head_mask[i],\n output_attentions,\n )\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass MobileBertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.do_activate = config.classifier_activation\n if self.do_activate:\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n if not self.do_activate:\n return first_token_tensor\n else:\n pooled_output = self.dense(first_token_tensor)\n pooled_output = torch.tanh(pooled_output)\n return pooled_output\n\n\nclass MobileBertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = NORM2FN[\"layer_norm\"](config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass MobileBertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = MobileBertPredictionHeadTransform(config)\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))\n hidden_states += self.decoder.bias\n return hidden_states\n\n\nclass MobileBertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = MobileBertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass MobileBertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = MobileBertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass MobileBertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = MobileBertConfig\n pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST\n load_tf_weights = load_tf_weights_in_mobilebert\n base_model_prefix = \"mobilebert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, (nn.LayerNorm, NoNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass MobileBertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.MobileBertForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction\n (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation\n before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n seq_relationship_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nMOBILEBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nMOBILEBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertModel(MobileBertPreTrainedModel):\n \"\"\"\n https://arxiv.org/pdf/2004.02984.pdf\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n self.embeddings = MobileBertEmbeddings(config)\n self.encoder = MobileBertEncoder(config)\n\n self.pooler = MobileBertPooler(config) if add_pooling_layer else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_hidden_states=None,\n output_attentions=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, self.device\n )\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `next sentence prediction (classification)` head.\n \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForPreTraining(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertPreTrainingHeads(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddigs):\n self.cls.predictions.decoder = new_embeddigs\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n # resize dense output embedings at first\n self.cls.predictions.dense = self._get_resized_lm_head(\n self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True\n )\n\n return super().resize_token_embeddings(new_num_tokens=new_num_tokens)\n\n @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair\n (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\n\n Returns:\n\n Examples::\n\n >>> from transformers import MobileBertTokenizer, MobileBertForPreTraining\n >>> import torch\n\n >>> tokenizer = MobileBertTokenizer.from_pretrained(\"google/mobilebert-uncased\")\n >>> model = MobileBertForPreTraining.from_pretrained(\"google/mobilebert-uncased\")\n\n >>> input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n >>> outputs = model(input_ids)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n total_loss = None\n if labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return MobileBertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n seq_relationship_logits=seq_relationship_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"MobileBert Model with a `language modeling` head on top. \"\"\", MOBILEBERT_START_DOCSTRING)\nclass MobileBertForMaskedLM(MobileBertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.mobilebert = MobileBertModel(config, add_pooling_layer=False)\n self.cls = MobileBertOnlyMLMHead(config)\n self.config = config\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddigs):\n self.cls.predictions.decoder = new_embeddigs\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n # resize dense output embedings at first\n self.cls.predictions.dense = self._get_resized_lm_head(\n self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True\n )\n return super().resize_token_embeddings(new_num_tokens=new_num_tokens)\n\n @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass MobileBertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with a `next sentence prediction (classification)` head on top. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertOnlyNSPHead(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair\n (see ``input_ids`` docstring) Indices should be in ``[0, 1]``.\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\n\n Returns:\n\n Examples::\n\n >>> from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction\n >>> import torch\n\n >>> tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased')\n >>> model = MobileBertForNextSentencePrediction.from_pretrained('google/mobilebert-uncased')\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> next_sentence = \"The sky is blue due to the shorter wavelength of blue light.\"\n >>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')\n\n >>> outputs = model(**encoding, labels=torch.LongTensor([1]))\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n \"\"\"\n\n if \"next_sentence_label\" in kwargs:\n warnings.warn(\n \"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"next_sentence_label\")\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n seq_relationship_score = self.cls(pooled_output)\n\n next_sentence_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))\n\n if not return_dict:\n output = (seq_relationship_score,) + outputs[2:]\n return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output\n\n return NextSentencePredictorOutput(\n loss=next_sentence_loss,\n logits=seq_relationship_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\n# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing\nclass MobileBertForSequenceClassification(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.mobilebert = MobileBertModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\n# Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing\nclass MobileBertForQuestionAnswering(MobileBertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.mobilebert = MobileBertModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and\n a softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\n# Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing\nclass MobileBertForMultipleChoice(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.mobilebert = MobileBertModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(\n MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\")\n )\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\n# Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing\nclass MobileBertForTokenClassification(MobileBertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.mobilebert = MobileBertModel(config, add_pooling_layer=False)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n",
"# coding=utf-8\n# Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Funnel Transformer model. \"\"\"\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_funnel import FunnelConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"FunnelConfig\"\n_TOKENIZER_FOR_DOC = \"FunnelTokenizer\"\n_CHECKPOINT_FOR_DOC = \"funnel-transformer/small\"\n\nFUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"funnel-transformer/small\", # B4-4-4H768\n \"funnel-transformer/small-base\", # B4-4-4H768, no decoder\n \"funnel-transformer/medium\", # B6-3x2-3x2H768\n \"funnel-transformer/medium-base\", # B6-3x2-3x2H768, no decoder\n \"funnel-transformer/intermediate\", # B6-6-6H768\n \"funnel-transformer/intermediate-base\", # B6-6-6H768, no decoder\n \"funnel-transformer/large\", # B8-8-8H1024\n \"funnel-transformer/large-base\", # B8-8-8H1024, no decoder\n \"funnel-transformer/xlarge-base\", # B10-10-10H1024\n \"funnel-transformer/xlarge\", # B10-10-10H1024, no decoder\n]\n\nINF = 1e6\n\n\ndef load_tf_weights_in_funnel(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n _layer_map = {\n \"k\": \"k_head\",\n \"q\": \"q_head\",\n \"v\": \"v_head\",\n \"o\": \"post_proj\",\n \"layer_1\": \"linear_1\",\n \"layer_2\": \"linear_2\",\n \"rel_attn\": \"attention\",\n \"ff\": \"ffn\",\n \"kernel\": \"weight\",\n \"gamma\": \"weight\",\n \"beta\": \"bias\",\n \"lookup_table\": \"weight\",\n \"word_embedding\": \"word_embeddings\",\n \"input\": \"embeddings\",\n }\n\n for name, array in zip(names, arrays):\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if name[0] == \"generator\":\n continue\n pointer = model\n skipped = False\n for m_name in name[1:]:\n if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch(r\"layer_\\d+\", m_name):\n layer_index = int(re.search(r\"layer_(\\d+)\", m_name).groups()[0])\n if layer_index < config.num_hidden_layers:\n block_idx = 0\n while layer_index >= config.block_sizes[block_idx]:\n layer_index -= config.block_sizes[block_idx]\n block_idx += 1\n pointer = pointer.blocks[block_idx][layer_index]\n else:\n layer_index -= config.num_hidden_layers\n pointer = pointer.layers[layer_index]\n elif m_name == \"r\" and isinstance(pointer, FunnelRelMultiheadAttention):\n pointer = pointer.r_kernel\n break\n elif m_name in _layer_map:\n pointer = getattr(pointer, _layer_map[m_name])\n else:\n try:\n pointer = getattr(pointer, m_name)\n except AttributeError:\n print(f\"Skipping {'/'.join(name)}\", array.shape)\n skipped = True\n break\n if not skipped:\n if len(pointer.shape) != len(array.shape):\n array = array.reshape(pointer.shape)\n if m_name == \"kernel\":\n array = np.transpose(array)\n pointer.data = torch.from_numpy(array)\n\n return model\n\n\nclass FunnelEmbeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(self, input_ids=None, inputs_embeds=None):\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n embeddings = self.layer_norm(inputs_embeds)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass FunnelAttentionStructure(nn.Module):\n \"\"\"\n Contains helpers for `FunnelRelMultiheadAttention `.\n \"\"\"\n\n cls_token_type_id: int = 2\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.sin_dropout = nn.Dropout(config.hidden_dropout)\n self.cos_dropout = nn.Dropout(config.hidden_dropout)\n # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was\n # divided.\n self.pooling_mult = None\n\n def init_attention_inputs(self, inputs_embeds, attention_mask=None, token_type_ids=None):\n \"\"\"Returns the attention inputs associated to the inputs of the model.\"\"\"\n # inputs_embeds has shape batch_size x seq_len x d_model\n # attention_mask and token_type_ids have shape batch_size x seq_len\n self.pooling_mult = 1\n self.seq_len = seq_len = inputs_embeds.size(1)\n position_embeds = self.get_position_embeds(seq_len, inputs_embeds.dtype, inputs_embeds.device)\n token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None\n cls_mask = (\n nn.functional.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0))\n if self.config.separate_cls\n else None\n )\n return (position_embeds, token_type_mat, attention_mask, cls_mask)\n\n def token_type_ids_to_mat(self, token_type_ids):\n \"\"\"Convert `token_type_ids` to `token_type_mat`.\"\"\"\n token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None]\n # Treat <cls> as in the same segment as both A & B\n cls_ids = token_type_ids == self.cls_token_type_id\n cls_mat = cls_ids[:, :, None] | cls_ids[:, None]\n return cls_mat | token_type_mat\n\n def get_position_embeds(self, seq_len, dtype, device):\n \"\"\"\n Create and cache inputs related to relative position encoding. Those are very different depending on whether we\n are using the factorized or the relative shift attention:\n\n For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2,\n final formula.\n\n For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final\n formula.\n\n Paper link: https://arxiv.org/abs/2006.03236\n \"\"\"\n d_model = self.config.d_model\n if self.config.attention_type == \"factorized\":\n # Notations from the paper, appending A.2.2, final formula.\n # We need to create and return the matrices phi, psi, pi and omega.\n pos_seq = torch.arange(0, seq_len, 1.0, dtype=dtype, device=device)\n freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device)\n inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))\n sinusoid = pos_seq[:, None] * inv_freq[None]\n sin_embed = torch.sin(sinusoid)\n sin_embed_d = self.sin_dropout(sin_embed)\n cos_embed = torch.cos(sinusoid)\n cos_embed_d = self.cos_dropout(cos_embed)\n # This is different from the formula on the paper...\n phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1)\n psi = torch.cat([cos_embed, sin_embed], dim=-1)\n pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1)\n omega = torch.cat([-sin_embed, cos_embed], dim=-1)\n return (phi, pi, psi, omega)\n else:\n # Notations from the paper, appending A.2.1, final formula.\n # We need to create and return all the possible vectors R for all blocks and shifts.\n freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device)\n inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))\n # Maximum relative positions for the first input\n rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=dtype, device=device)\n zero_offset = seq_len * 2\n sinusoid = rel_pos_id[:, None] * inv_freq[None]\n sin_embed = self.sin_dropout(torch.sin(sinusoid))\n cos_embed = self.cos_dropout(torch.cos(sinusoid))\n pos_embed = torch.cat([sin_embed, cos_embed], dim=-1)\n\n pos = torch.arange(0, seq_len, dtype=dtype, device=device)\n pooled_pos = pos\n position_embeds_list = []\n for block_index in range(0, self.config.num_blocks):\n # For each block with block_index > 0, we need two types position embeddings:\n # - Attention(pooled-q, unpooled-kv)\n # - Attention(pooled-q, pooled-kv)\n # For block_index = 0 we only need the second one and leave the first one as None.\n\n # First type\n if block_index == 0:\n position_embeds_pooling = None\n else:\n pooled_pos = self.stride_pool_pos(pos, block_index)\n\n # construct rel_pos_id\n stride = 2 ** (block_index - 1)\n rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)\n rel_pos = rel_pos[:, None] + zero_offset\n rel_pos = rel_pos.expand(rel_pos.size(0), d_model)\n position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos)\n\n # Second type\n pos = pooled_pos\n stride = 2 ** block_index\n rel_pos = self.relative_pos(pos, stride)\n\n rel_pos = rel_pos[:, None] + zero_offset\n rel_pos = rel_pos.expand(rel_pos.size(0), d_model)\n position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos)\n\n position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])\n return position_embeds_list\n\n def stride_pool_pos(self, pos_id, block_index):\n \"\"\"\n Pool `pos_id` while keeping the cls token separate (if `config.separate_cls=True`).\n \"\"\"\n if self.config.separate_cls:\n # Under separate <cls>, we treat the <cls> as the first token in\n # the previous block of the 1st real block. Since the 1st real\n # block always has position 1, the position of the previous block\n # will be at `1 - 2 ** block_index`.\n cls_pos = pos_id.new_tensor([-(2 ** block_index) + 1])\n pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:]\n return torch.cat([cls_pos, pooled_pos_id[::2]], 0)\n else:\n return pos_id[::2]\n\n def relative_pos(self, pos, stride, pooled_pos=None, shift=1):\n \"\"\"\n Build the relative positional vector between `pos` and `pooled_pos`.\n \"\"\"\n if pooled_pos is None:\n pooled_pos = pos\n\n ref_point = pooled_pos[0] - pos[0]\n num_remove = shift * len(pooled_pos)\n max_dist = ref_point + num_remove * stride\n min_dist = pooled_pos[0] - pos[-1]\n\n return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device)\n\n def stride_pool(self, tensor, axis):\n \"\"\"\n Perform pooling by stride slicing the tensor along the given axis.\n \"\"\"\n if tensor is None:\n return None\n\n # Do the stride pool recursively if axis is a list or a tuple of ints.\n if isinstance(axis, (list, tuple)):\n for ax in axis:\n tensor = self.stride_pool(tensor, ax)\n return tensor\n\n # Do the stride pool recursively if tensor is a list or tuple of tensors.\n if isinstance(tensor, (tuple, list)):\n return type(tensor)(self.stride_pool(x, axis) for x in tensor)\n\n # Deal with negative axis\n axis %= tensor.ndim\n\n axis_slice = (\n slice(None, -1, 2) if self.config.separate_cls and self.config.truncate_seq else slice(None, None, 2)\n )\n enc_slice = [slice(None)] * axis + [axis_slice]\n if self.config.separate_cls:\n cls_slice = [slice(None)] * axis + [slice(None, 1)]\n tensor = torch.cat([tensor[cls_slice], tensor], axis=axis)\n return tensor[enc_slice]\n\n def pool_tensor(self, tensor, mode=\"mean\", stride=2):\n \"\"\"Apply 1D pooling to a tensor of size [B x T (x H)].\"\"\"\n if tensor is None:\n return None\n\n # Do the pool recursively if tensor is a list or tuple of tensors.\n if isinstance(tensor, (tuple, list)):\n return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)\n\n if self.config.separate_cls:\n suffix = tensor[:, :-1] if self.config.truncate_seq else tensor\n tensor = torch.cat([tensor[:, :1], suffix], dim=1)\n\n ndim = tensor.ndim\n if ndim == 2:\n tensor = tensor[:, None, :, None]\n elif ndim == 3:\n tensor = tensor[:, None, :, :]\n # Stride is applied on the second-to-last dimension.\n stride = (stride, 1)\n\n if mode == \"mean\":\n tensor = nn.functional.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True)\n elif mode == \"max\":\n tensor = nn.functional.max_pool2d(tensor, stride, stride=stride, ceil_mode=True)\n elif mode == \"min\":\n tensor = -nn.functional.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True)\n else:\n raise NotImplementedError(\"The supported modes are 'mean', 'max' and 'min'.\")\n\n if ndim == 2:\n return tensor[:, 0, :, 0]\n elif ndim == 3:\n return tensor[:, 0]\n return tensor\n\n def pre_attention_pooling(self, output, attention_inputs):\n \"\"\"Pool `output` and the proper parts of `attention_inputs` before the attention layer.\"\"\"\n position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs\n if self.config.pool_q_only:\n if self.config.attention_type == \"factorized\":\n position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]\n token_type_mat = self.stride_pool(token_type_mat, 1)\n cls_mask = self.stride_pool(cls_mask, 0)\n output = self.pool_tensor(output, mode=self.config.pooling_type)\n else:\n self.pooling_mult *= 2\n if self.config.attention_type == \"factorized\":\n position_embeds = self.stride_pool(position_embeds, 0)\n token_type_mat = self.stride_pool(token_type_mat, [1, 2])\n cls_mask = self.stride_pool(cls_mask, [1, 2])\n attention_mask = self.pool_tensor(attention_mask, mode=\"min\")\n output = self.pool_tensor(output, mode=self.config.pooling_type)\n attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)\n return output, attention_inputs\n\n def post_attention_pooling(self, attention_inputs):\n \"\"\"Pool the proper parts of `attention_inputs` after the attention layer.\"\"\"\n position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs\n if self.config.pool_q_only:\n self.pooling_mult *= 2\n if self.config.attention_type == \"factorized\":\n position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)\n token_type_mat = self.stride_pool(token_type_mat, 2)\n cls_mask = self.stride_pool(cls_mask, 1)\n attention_mask = self.pool_tensor(attention_mask, mode=\"min\")\n attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)\n return attention_inputs\n\n\ndef _relative_shift_gather(positional_attn, context_len, shift):\n batch_size, n_head, seq_len, max_rel_len = positional_attn.shape\n # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j\n\n # What's next is the same as doing the following gather, which might be clearer code but less efficient.\n # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)\n # # matrix of context_len + i-j\n # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))\n\n positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])\n positional_attn = positional_attn[:, :, shift:, :]\n positional_attn = torch.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift])\n positional_attn = positional_attn[..., :context_len]\n return positional_attn\n\n\nclass FunnelRelMultiheadAttention(nn.Module):\n def __init__(self, config, block_index):\n super().__init__()\n self.config = config\n self.block_index = block_index\n d_model, n_head, d_head = config.d_model, config.n_head, config.d_head\n\n self.hidden_dropout = nn.Dropout(config.hidden_dropout)\n self.attention_dropout = nn.Dropout(config.attention_dropout)\n\n self.q_head = nn.Linear(d_model, n_head * d_head, bias=False)\n self.k_head = nn.Linear(d_model, n_head * d_head)\n self.v_head = nn.Linear(d_model, n_head * d_head)\n\n self.r_w_bias = nn.Parameter(torch.zeros([n_head, d_head]))\n self.r_r_bias = nn.Parameter(torch.zeros([n_head, d_head]))\n self.r_kernel = nn.Parameter(torch.zeros([d_model, n_head, d_head]))\n self.r_s_bias = nn.Parameter(torch.zeros([n_head, d_head]))\n self.seg_embed = nn.Parameter(torch.zeros([2, n_head, d_head]))\n\n self.post_proj = nn.Linear(n_head * d_head, d_model)\n self.layer_norm = nn.LayerNorm(d_model, eps=config.layer_norm_eps)\n self.scale = 1.0 / (d_head ** 0.5)\n\n def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):\n \"\"\"Relative attention score for the positional encodings\"\"\"\n # q_head has shape batch_size x sea_len x n_head x d_head\n if self.config.attention_type == \"factorized\":\n # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236)\n # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model\n phi, pi, psi, omega = position_embeds\n # Shape n_head x d_head\n u = self.r_r_bias * self.scale\n # Shape d_model x n_head x d_head\n w_r = self.r_kernel\n\n # Shape batch_size x sea_len x n_head x d_model\n q_r_attention = torch.einsum(\"binh,dnh->bind\", q_head + u, w_r)\n q_r_attention_1 = q_r_attention * phi[:, None]\n q_r_attention_2 = q_r_attention * pi[:, None]\n\n # Shape batch_size x n_head x seq_len x context_len\n positional_attn = torch.einsum(\"bind,jd->bnij\", q_r_attention_1, psi) + torch.einsum(\n \"bind,jd->bnij\", q_r_attention_2, omega\n )\n else:\n shift = 2 if q_head.shape[1] != context_len else 1\n # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)\n # Grab the proper positional encoding, shape max_rel_len x d_model\n r = position_embeds[self.block_index][shift - 1]\n # Shape n_head x d_head\n v = self.r_r_bias * self.scale\n # Shape d_model x n_head x d_head\n w_r = self.r_kernel\n\n # Shape max_rel_len x n_head x d_model\n r_head = torch.einsum(\"td,dnh->tnh\", r, w_r)\n # Shape batch_size x n_head x seq_len x max_rel_len\n positional_attn = torch.einsum(\"binh,tnh->bnit\", q_head + v, r_head)\n # Shape batch_size x n_head x seq_len x context_len\n positional_attn = _relative_shift_gather(positional_attn, context_len, shift)\n\n if cls_mask is not None:\n positional_attn *= cls_mask\n return positional_attn\n\n def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):\n \"\"\"Relative attention score for the token_type_ids\"\"\"\n if token_type_mat is None:\n return 0\n batch_size, seq_len, context_len = token_type_mat.shape\n # q_head has shape batch_size x seq_len x n_head x d_head\n # Shape n_head x d_head\n r_s_bias = self.r_s_bias * self.scale\n\n # Shape batch_size x n_head x seq_len x 2\n token_type_bias = torch.einsum(\"bind,snd->bnis\", q_head + r_s_bias, self.seg_embed)\n # Shape batch_size x n_head x seq_len x context_len\n token_type_mat = token_type_mat[:, None].expand([batch_size, q_head.shape[2], seq_len, context_len])\n # Shapes batch_size x n_head x seq_len\n diff_token_type, same_token_type = torch.split(token_type_bias, 1, dim=-1)\n # Shape batch_size x n_head x seq_len x context_len\n token_type_attn = torch.where(\n token_type_mat, same_token_type.expand(token_type_mat.shape), diff_token_type.expand(token_type_mat.shape)\n )\n\n if cls_mask is not None:\n token_type_attn *= cls_mask\n return token_type_attn\n\n def forward(self, query, key, value, attention_inputs, output_attentions=False):\n # query has shape batch_size x seq_len x d_model\n # key and value have shapes batch_size x context_len x d_model\n position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs\n\n batch_size, seq_len, _ = query.shape\n context_len = key.shape[1]\n n_head, d_head = self.config.n_head, self.config.d_head\n\n # Shape batch_size x seq_len x n_head x d_head\n q_head = self.q_head(query).view(batch_size, seq_len, n_head, d_head)\n # Shapes batch_size x context_len x n_head x d_head\n k_head = self.k_head(key).view(batch_size, context_len, n_head, d_head)\n v_head = self.v_head(value).view(batch_size, context_len, n_head, d_head)\n\n q_head = q_head * self.scale\n # Shape n_head x d_head\n r_w_bias = self.r_w_bias * self.scale\n # Shapes batch_size x n_head x seq_len x context_len\n content_score = torch.einsum(\"bind,bjnd->bnij\", q_head + r_w_bias, k_head)\n positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask)\n token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)\n\n # merge attention scores\n attn_score = content_score + positional_attn + token_type_attn\n\n # precision safe in case of mixed precision training\n dtype = attn_score.dtype\n attn_score = attn_score.float()\n # perform masking\n if attention_mask is not None:\n attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float())\n # attention probability\n attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype)\n attn_prob = self.attention_dropout(attn_prob)\n\n # attention output, shape batch_size x seq_len x n_head x d_head\n attn_vec = torch.einsum(\"bnij,bjnd->bind\", attn_prob, v_head)\n\n # Shape shape batch_size x seq_len x d_model\n attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_head * d_head))\n attn_out = self.hidden_dropout(attn_out)\n\n output = self.layer_norm(query + attn_out)\n return (output, attn_prob) if output_attentions else (output,)\n\n\nclass FunnelPositionwiseFFN(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.linear_1 = nn.Linear(config.d_model, config.d_inner)\n self.activation_function = ACT2FN[config.hidden_act]\n self.activation_dropout = nn.Dropout(config.activation_dropout)\n self.linear_2 = nn.Linear(config.d_inner, config.d_model)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps)\n\n def forward(self, hidden):\n h = self.linear_1(hidden)\n h = self.activation_function(h)\n h = self.activation_dropout(h)\n h = self.linear_2(h)\n h = self.dropout(h)\n return self.layer_norm(hidden + h)\n\n\nclass FunnelLayer(nn.Module):\n def __init__(self, config, block_index):\n super().__init__()\n self.attention = FunnelRelMultiheadAttention(config, block_index)\n self.ffn = FunnelPositionwiseFFN(config)\n\n def forward(self, query, key, value, attention_inputs, output_attentions=False):\n attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions)\n output = self.ffn(attn[0])\n return (output, attn[1]) if output_attentions else (output,)\n\n\nclass FunnelEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.attention_structure = FunnelAttentionStructure(config)\n self.blocks = nn.ModuleList(\n [\n nn.ModuleList([FunnelLayer(config, block_index) for _ in range(block_size)])\n for block_index, block_size in enumerate(config.block_sizes)\n ]\n )\n\n def forward(\n self,\n inputs_embeds,\n attention_mask=None,\n token_type_ids=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n # The pooling is not implemented on long tensors, so we convert this mask.\n attention_mask = attention_mask.type_as(inputs_embeds)\n attention_inputs = self.attention_structure.init_attention_inputs(\n inputs_embeds,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n )\n hidden = inputs_embeds\n\n all_hidden_states = (inputs_embeds,) if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n for block_index, block in enumerate(self.blocks):\n pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1)\n pooling_flag = pooling_flag and block_index > 0\n if pooling_flag:\n pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling(\n hidden, attention_inputs\n )\n for (layer_index, layer) in enumerate(block):\n for repeat_index in range(self.config.block_repeats[block_index]):\n do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag\n if do_pooling:\n query = pooled_hidden\n key = value = hidden if self.config.pool_q_only else pooled_hidden\n else:\n query = key = value = hidden\n layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions)\n hidden = layer_output[0]\n if do_pooling:\n attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs)\n\n if output_attentions:\n all_attentions = all_attentions + layer_output[1:]\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden,)\n\n if not return_dict:\n return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)\n\n\ndef upsample(x, stride, target_len, separate_cls=True, truncate_seq=False):\n \"\"\"\n Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.\n \"\"\"\n if stride == 1:\n return x\n if separate_cls:\n cls = x[:, :1]\n x = x[:, 1:]\n output = torch.repeat_interleave(x, repeats=stride, dim=1)\n if separate_cls:\n if truncate_seq:\n output = nn.functional.pad(output, (0, 0, 0, stride - 1, 0, 0))\n output = output[:, : target_len - 1]\n output = torch.cat([cls, output], dim=1)\n else:\n output = output[:, :target_len]\n return output\n\n\nclass FunnelDecoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.attention_structure = FunnelAttentionStructure(config)\n self.layers = nn.ModuleList([FunnelLayer(config, 0) for _ in range(config.num_decoder_layers)])\n\n def forward(\n self,\n final_hidden,\n first_block_hidden,\n attention_mask=None,\n token_type_ids=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n upsampled_hidden = upsample(\n final_hidden,\n stride=2 ** (len(self.config.block_sizes) - 1),\n target_len=first_block_hidden.shape[1],\n separate_cls=self.config.separate_cls,\n truncate_seq=self.config.truncate_seq,\n )\n\n hidden = upsampled_hidden + first_block_hidden\n all_hidden_states = (hidden,) if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n attention_inputs = self.attention_structure.init_attention_inputs(\n hidden,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n )\n\n for layer in self.layers:\n layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions)\n hidden = layer_output[0]\n\n if output_attentions:\n all_attentions = all_attentions + layer_output[1:]\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden,)\n\n if not return_dict:\n return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)\n\n\nclass FunnelDiscriminatorPredictions(nn.Module):\n \"\"\"Prediction module for the discriminator, made up of two dense layers.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.dense = nn.Linear(config.d_model, config.d_model)\n self.dense_prediction = nn.Linear(config.d_model, 1)\n\n def forward(self, discriminator_hidden_states):\n hidden_states = self.dense(discriminator_hidden_states)\n hidden_states = ACT2FN[self.config.hidden_act](hidden_states)\n logits = self.dense_prediction(hidden_states).squeeze()\n return logits\n\n\nclass FunnelPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = FunnelConfig\n load_tf_weights = load_tf_weights_in_funnel\n base_model_prefix = \"funnel\"\n\n def _init_weights(self, module):\n classname = module.__class__.__name__\n if classname.find(\"Linear\") != -1:\n if getattr(module, \"weight\", None) is not None:\n if self.config.initializer_std is None:\n fan_out, fan_in = module.weight.shape\n std = np.sqrt(1.0 / float(fan_in + fan_out))\n else:\n std = self.config.initializer_std\n nn.init.normal_(module.weight, std=std)\n if getattr(module, \"bias\", None) is not None:\n nn.init.constant_(module.bias, 0.0)\n elif classname == \"FunnelRelMultiheadAttention\":\n nn.init.uniform_(module.r_w_bias, b=self.config.initializer_range)\n nn.init.uniform_(module.r_r_bias, b=self.config.initializer_range)\n nn.init.uniform_(module.r_kernel, b=self.config.initializer_range)\n nn.init.uniform_(module.r_s_bias, b=self.config.initializer_range)\n nn.init.uniform_(module.seg_embed, b=self.config.initializer_range)\n elif classname == \"FunnelEmbeddings\":\n std = 1.0 if self.config.initializer_std is None else self.config.initializer_std\n nn.init.normal_(module.word_embeddings.weight, std=std)\n if module.word_embeddings.padding_idx is not None:\n module.word_embeddings.weight.data[module.padding_idx].zero_()\n\n\nclass FunnelClassificationHead(nn.Module):\n def __init__(self, config, n_labels):\n super().__init__()\n self.linear_hidden = nn.Linear(config.d_model, config.d_model)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.linear_out = nn.Linear(config.d_model, n_labels)\n\n def forward(self, hidden):\n hidden = self.linear_hidden(hidden)\n hidden = torch.tanh(hidden)\n hidden = self.dropout(hidden)\n return self.linear_out(hidden)\n\n\n@dataclass\nclass FunnelForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.FunnelForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss of the ELECTRA-style objective.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):\n Prediction scores of the head (scores for each token before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nFUNNEL_START_DOCSTRING = r\"\"\"\n\n The Funnel Transformer model was proposed in `Funnel-Transformer: Filtering out Sequential Redundancy for Efficient\n Language Processing <https://arxiv.org/abs/2006.03236>`__ by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.FunnelConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nFUNNEL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"\"\"\n The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called\n decoder) or any task-specific head on top.\n \"\"\",\n FUNNEL_START_DOCSTRING,\n)\nclass FunnelBaseModel(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = FunnelEmbeddings(config)\n self.encoder = FunnelEncoder(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"funnel-transformer/small-base\",\n output_type=BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # TODO: deal with head_mask\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n encoder_outputs = self.encoder(\n inputs_embeds,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n return encoder_outputs\n\n\n@add_start_docstrings(\n \"The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.\",\n FUNNEL_START_DOCSTRING,\n)\nclass FunnelModel(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.embeddings = FunnelEmbeddings(config)\n self.encoder = FunnelEncoder(config)\n self.decoder = FunnelDecoder(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # TODO: deal with head_mask\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n encoder_outputs = self.encoder(\n inputs_embeds,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n output_attentions=output_attentions,\n output_hidden_states=True,\n return_dict=return_dict,\n )\n\n decoder_outputs = self.decoder(\n final_hidden=encoder_outputs[0],\n first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]],\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n idx = 0\n outputs = (decoder_outputs[0],)\n if output_hidden_states:\n idx += 1\n outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)\n if output_attentions:\n idx += 1\n outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)\n return outputs\n\n return BaseModelOutput(\n last_hidden_state=decoder_outputs[0],\n hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states)\n if output_hidden_states\n else None,\n attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None,\n )\n\n\nadd_start_docstrings(\n \"\"\"\n Funnel Transformer model with a binary classification head on top as used during pretraining for identifying\n generated tokens.\n \"\"\",\n FUNNEL_START_DOCSTRING,\n)\n\n\nclass FunnelForPreTraining(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.funnel = FunnelModel(config)\n self.discriminator_predictions = FunnelDiscriminatorPredictions(config)\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the ELECTRA-style loss. Input should be a sequence of tokens (see :obj:`input_ids`\n docstring) Indices should be in ``[0, 1]``:\n\n - 0 indicates the token is an original token,\n - 1 indicates the token was replaced.\n\n Returns:\n\n Examples::\n\n >>> from transformers import FunnelTokenizer, FunnelForPreTraining\n >>> import torch\n\n >>> tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small')\n >>> model = FunnelForPreTraining.from_pretrained('funnel-transformer/small')\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors= \"pt\")\n >>> logits = model(**inputs).logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n discriminator_hidden_states = self.funnel(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n discriminator_sequence_output = discriminator_hidden_states[0]\n\n logits = self.discriminator_predictions(discriminator_sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = nn.BCEWithLogitsLoss()\n if attention_mask is not None:\n active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1\n active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]\n active_labels = labels[active_loss]\n loss = loss_fct(active_logits, active_labels.float())\n else:\n loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())\n\n if not return_dict:\n output = (logits,) + discriminator_hidden_states[1:]\n return ((loss,) + output) if loss is not None else output\n\n return FunnelForPreTrainingOutput(\n loss=loss,\n logits=logits,\n hidden_states=discriminator_hidden_states.hidden_states,\n attentions=discriminator_hidden_states.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"Funnel Transformer Model with a `language modeling` head on top. \"\"\", FUNNEL_START_DOCSTRING)\nclass FunnelForMaskedLM(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.funnel = FunnelModel(config)\n self.lm_head = nn.Linear(config.d_model, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n mask=\"<mask>\",\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.funnel(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = outputs[0]\n prediction_logits = self.lm_head(last_hidden_state)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the\n first timestep of the last hidden state) e.g. for GLUE tasks.\n \"\"\",\n FUNNEL_START_DOCSTRING,\n)\nclass FunnelForSequenceClassification(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.funnel = FunnelBaseModel(config)\n self.classifier = FunnelClassificationHead(config, config.num_labels)\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"funnel-transformer/small-base\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.funnel(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = outputs[0]\n pooled_output = last_hidden_state[:, 0]\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first\n timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n FUNNEL_START_DOCSTRING,\n)\nclass FunnelForMultipleChoice(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.funnel = FunnelBaseModel(config)\n self.classifier = FunnelClassificationHead(config, 1)\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"funnel-transformer/small-base\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.funnel(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = outputs[0]\n pooled_output = last_hidden_state[:, 0]\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states\n output) e.g. for Named-Entity-Recognition (NER) tasks.\n \"\"\",\n FUNNEL_START_DOCSTRING,\n)\nclass FunnelForTokenClassification(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.funnel = FunnelModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.funnel(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = outputs[0]\n last_hidden_state = self.dropout(last_hidden_state)\n logits = self.classifier(last_hidden_state)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD\n (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n FUNNEL_START_DOCSTRING,\n)\nclass FunnelForQuestionAnswering(FunnelPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.funnel = FunnelModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.funnel(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = outputs[0]\n\n logits = self.qa_outputs(last_hidden_state)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n",
"# coding=utf-8\n# Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 LayoutLM model. \"\"\"\n\nimport math\nimport warnings\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings\nfrom ...modeling_tf_outputs import (\n TFBaseModelOutputWithPastAndCrossAttentions,\n TFBaseModelOutputWithPoolingAndCrossAttentions,\n TFMaskedLMOutput,\n TFSequenceClassifierOutput,\n TFTokenClassifierOutput,\n)\nfrom ...modeling_tf_utils import (\n TFMaskedLanguageModelingLoss,\n TFModelInputType,\n TFPreTrainedModel,\n TFSequenceClassificationLoss,\n TFTokenClassificationLoss,\n get_initializer,\n input_processing,\n keras_serializable,\n shape_list,\n)\nfrom ...utils import logging\nfrom .configuration_layoutlm import LayoutLMConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"LayoutLMConfig\"\n_TOKENIZER_FOR_DOC = \"LayoutLMTokenizer\"\n\nTF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/layoutlm-base-uncased\",\n \"microsoft/layoutlm-large-uncased\",\n]\n\n\nclass TFLayoutLMEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.type_vocab_size = config.type_vocab_size\n self.hidden_size = config.hidden_size\n self.max_position_embeddings = config.max_position_embeddings\n self.max_2d_position_embeddings = config.max_2d_position_embeddings\n self.initializer_range = config.initializer_range\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def build(self, input_shape: tf.TensorShape):\n with tf.name_scope(\"word_embeddings\"):\n self.weight = self.add_weight(\n name=\"weight\",\n shape=[self.vocab_size, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"token_type_embeddings\"):\n self.token_type_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.type_vocab_size, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"position_embeddings\"):\n self.position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_position_embeddings, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"x_position_embeddings\"):\n self.x_position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_2d_position_embeddings, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"y_position_embeddings\"):\n self.y_position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_2d_position_embeddings, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"h_position_embeddings\"):\n self.h_position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_2d_position_embeddings, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"w_position_embeddings\"):\n self.w_position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_2d_position_embeddings, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n super().build(input_shape)\n\n def call(\n self,\n input_ids: tf.Tensor = None,\n bbox: tf.Tensor = None,\n position_ids: tf.Tensor = None,\n token_type_ids: tf.Tensor = None,\n inputs_embeds: tf.Tensor = None,\n training: bool = False,\n ) -> tf.Tensor:\n \"\"\"\n Applies embedding based on inputs tensor.\n\n Returns:\n final_embeddings (:obj:`tf.Tensor`): output embedding tensor.\n \"\"\"\n assert not (input_ids is None and inputs_embeds is None)\n\n if input_ids is not None:\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n\n input_shape = shape_list(inputs_embeds)[:-1]\n\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n\n if bbox is None:\n bbox = bbox = tf.fill(input_shape + [4], value=0)\n try:\n left_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 0])\n upper_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 1])\n right_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 2])\n lower_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 3])\n except IndexError as e:\n raise IndexError(\"The :obj:`bbox`coordinate values should be within 0-1000 range.\") from e\n h_position_embeddings = tf.gather(self.h_position_embeddings, bbox[:, :, 3] - bbox[:, :, 1])\n w_position_embeddings = tf.gather(self.w_position_embeddings, bbox[:, :, 2] - bbox[:, :, 0])\n\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = (\n inputs_embeds\n + position_embeds\n + token_type_embeds\n + left_position_embeddings\n + upper_position_embeddings\n + right_position_embeddings\n + lower_position_embeddings\n + h_position_embeddings\n + w_position_embeddings\n )\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n\n return final_embeddings\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->LayoutLM\nclass TFLayoutLMSelfAttention(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number \"\n f\"of attention heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.sqrt_att_head_size = math.sqrt(self.attention_head_size)\n\n self.query = tf.keras.layers.Dense(\n units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"query\"\n )\n self.key = tf.keras.layers.Dense(\n units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"key\"\n )\n self.value = tf.keras.layers.Dense(\n units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"value\"\n )\n self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:\n # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]\n tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))\n\n # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]\n return tf.transpose(tensor, perm=[0, 2, 1, 3])\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n head_mask: tf.Tensor,\n encoder_hidden_states: tf.Tensor,\n encoder_attention_mask: tf.Tensor,\n past_key_value: Tuple[tf.Tensor],\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n batch_size = shape_list(hidden_states)[0]\n mixed_query_layer = self.query(inputs=hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)\n value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)\n value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)\n key_layer = tf.concatenate([past_key_value[0], key_layer], dim=2)\n value_layer = tf.concatenate([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)\n value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n\n if self.is_decoder:\n # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n # (batch size, num_heads, seq_len_q, seq_len_k)\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)\n attention_scores = tf.divide(attention_scores, dk)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in TFLayoutLMModel call() function)\n attention_scores = tf.add(attention_scores, attention_mask)\n\n # Normalize the attention scores to probabilities.\n attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(inputs=attention_probs, training=training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = tf.multiply(attention_probs, head_mask)\n\n attention_output = tf.matmul(attention_probs, value_layer)\n attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])\n\n # (batch_size, seq_len_q, all_head_size)\n attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))\n outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->LayoutLM\nclass TFLayoutLMSelfOutput(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->LayoutLM\nclass TFLayoutLMAttention(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.self_attention = TFLayoutLMSelfAttention(config, name=\"self\")\n self.dense_output = TFLayoutLMSelfOutput(config, name=\"output\")\n\n def prune_heads(self, heads):\n raise NotImplementedError\n\n def call(\n self,\n input_tensor: tf.Tensor,\n attention_mask: tf.Tensor,\n head_mask: tf.Tensor,\n encoder_hidden_states: tf.Tensor,\n encoder_attention_mask: tf.Tensor,\n past_key_value: Tuple[tf.Tensor],\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n self_outputs = self.self_attention(\n hidden_states=input_tensor,\n attention_mask=attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = self.dense_output(\n hidden_states=self_outputs[0], input_tensor=input_tensor, training=training\n )\n # add attentions (possibly with past_key_value) if we output them\n outputs = (attention_output,) + self_outputs[1:]\n\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->LayoutLM\nclass TFLayoutLMIntermediate(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->LayoutLM\nclass TFLayoutLMOutput(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->LayoutLM\nclass TFLayoutLMLayer(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.attention = TFLayoutLMAttention(config, name=\"attention\")\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n if not self.is_decoder:\n raise ValueError(f\"{self} should be used as a decoder model if cross attention is added\")\n self.crossattention = TFLayoutLMAttention(config, name=\"crossattention\")\n self.intermediate = TFLayoutLMIntermediate(config, name=\"intermediate\")\n self.bert_output = TFLayoutLMOutput(config, name=\"output\")\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n head_mask: tf.Tensor,\n encoder_hidden_states: Optional[tf.Tensor],\n encoder_attention_mask: Optional[tf.Tensor],\n past_key_value: Optional[Tuple[tf.Tensor]],\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n input_tensor=hidden_states,\n attention_mask=attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=self_attn_past_key_value,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n if not hasattr(self, \"crossattention\"):\n raise ValueError(\n f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers \"\n \"by setting `config.add_cross_attention=True`\"\n )\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n input_tensor=attention_output,\n attention_mask=attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_value=cross_attn_past_key_value,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n intermediate_output = self.intermediate(hidden_states=attention_output)\n layer_output = self.bert_output(\n hidden_states=intermediate_output, input_tensor=attention_output, training=training\n )\n outputs = (layer_output,) + outputs # add attentions if we output them\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->LayoutLM\nclass TFLayoutLMEncoder(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.layer = [TFLayoutLMLayer(config, name=f\"layer_._{i}\") for i in range(config.num_hidden_layers)]\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n head_mask: tf.Tensor,\n encoder_hidden_states: Optional[tf.Tensor],\n encoder_attention_mask: Optional[tf.Tensor],\n past_key_values: Optional[Tuple[Tuple[tf.Tensor]]],\n use_cache: Optional[bool],\n output_attentions: bool,\n output_hidden_states: bool,\n return_dict: bool,\n training: bool = False,\n ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n layer_outputs = layer_module(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n training=training,\n )\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention and encoder_hidden_states is not None:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None\n )\n\n return TFBaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->LayoutLM\nclass TFLayoutLMPooler(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n activation=\"tanh\",\n name=\"dense\",\n )\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(inputs=first_token_tensor)\n\n return pooled_output\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->LayoutLM\nclass TFLayoutLMPredictionHeadTransform(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.transform_act_fn = config.hidden_act\n\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(inputs=hidden_states)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->LayoutLM\nclass TFLayoutLMLMPredictionHead(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.hidden_size = config.hidden_size\n\n self.transform = TFLayoutLMPredictionHeadTransform(config, name=\"transform\")\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape: tf.TensorShape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n\n super().build(input_shape)\n\n def get_output_embeddings(self) -> tf.keras.layers.Layer:\n return self.input_embeddings\n\n def set_output_embeddings(self, value: tf.Variable):\n self.input_embeddings.weight = value\n self.input_embeddings.vocab_size = shape_list(value)[0]\n\n def get_bias(self) -> Dict[str, tf.Variable]:\n return {\"bias\": self.bias}\n\n def set_bias(self, value: tf.Variable):\n self.bias = value[\"bias\"]\n self.vocab_size = shape_list(value[\"bias\"])[0]\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.transform(hidden_states=hidden_states)\n seq_length = shape_list(hidden_states)[1]\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])\n hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])\n hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->LayoutLM\nclass TFLayoutLMMLMHead(tf.keras.layers.Layer):\n def __init__(self, config: LayoutLMConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n\n self.predictions = TFLayoutLMLMPredictionHead(config, input_embeddings, name=\"predictions\")\n\n def call(self, sequence_output: tf.Tensor) -> tf.Tensor:\n prediction_scores = self.predictions(hidden_states=sequence_output)\n\n return prediction_scores\n\n\n@keras_serializable\nclass TFLayoutLMMainLayer(tf.keras.layers.Layer):\n config_class = LayoutLMConfig\n\n def __init__(self, config: LayoutLMConfig, add_pooling_layer: bool = True, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n\n self.embeddings = TFLayoutLMEmbeddings(config, name=\"embeddings\")\n self.encoder = TFLayoutLMEncoder(config, name=\"encoder\")\n self.pooler = TFLayoutLMPooler(config, name=\"pooler\") if add_pooling_layer else None\n\n def get_input_embeddings(self) -> tf.keras.layers.Layer:\n return self.embeddings\n\n def set_input_embeddings(self, value: tf.Variable):\n self.embeddings.weight = value\n self.embeddings.vocab_size = shape_list(value)[0]\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n bbox: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None,\n encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: bool = False,\n **kwargs,\n ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs[\"attention_mask\"] is None:\n inputs[\"attention_mask\"] = tf.fill(dims=input_shape, value=1)\n\n if inputs[\"token_type_ids\"] is None:\n inputs[\"token_type_ids\"] = tf.fill(dims=input_shape, value=0)\n if inputs[\"bbox\"] is None:\n inputs[\"bbox\"] = tf.fill(dims=input_shape + [4], value=0)\n\n embedding_output = self.embeddings(\n input_ids=inputs[\"input_ids\"],\n bbox=inputs[\"bbox\"],\n position_ids=inputs[\"position_ids\"],\n token_type_ids=inputs[\"token_type_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n training=inputs[\"training\"],\n )\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = tf.reshape(inputs[\"attention_mask\"], (input_shape[0], 1, 1, input_shape[1]))\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)\n one_cst = tf.constant(1.0, dtype=embedding_output.dtype)\n ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)\n extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if inputs[\"head_mask\"] is not None:\n raise NotImplementedError\n else:\n inputs[\"head_mask\"] = [None] * self.config.num_hidden_layers\n\n encoder_outputs = self.encoder(\n hidden_states=embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=inputs[\"head_mask\"],\n # Need to pass these required positional arguments to `Encoder`\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=False,\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None\n\n if not inputs[\"return_dict\"]:\n return (\n sequence_output,\n pooled_output,\n ) + encoder_outputs[1:]\n\n return TFBaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\nclass TFLayoutLMPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = LayoutLMConfig\n base_model_prefix = \"layoutlm\"\n\n\nLAYOUTLM_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the\n generic methods the library implements for all its model (such as downloading or saving, resizing the input\n embeddings, pruning heads etc.)\n\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use\n it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage\n and behavior.\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all\n the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in\n the first positional argument :\n\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Args:\n config (:class:`~transformers.LayoutLMConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the\n model weights.\n\"\"\"\n\nLAYOUTLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.LayoutLMTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n bbox (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0}, 4)`, `optional`):\n Bounding Boxes of each input sequence tokens. Selected in the range ``[0,\n config.max_2d_position_embeddings- 1]``.\n attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`__\n position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`__\n head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top.\",\n LAYOUTLM_START_DOCSTRING,\n)\nclass TFLayoutLMModel(TFLayoutLMPreTrainedModel):\n def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.layoutlm = TFLayoutLMMainLayer(config, name=\"layoutlm\")\n\n @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(\n output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n bbox: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None,\n encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:\n r\"\"\"\n Returns:\n\n Examples::\n\n >>> from transformers import LayoutLMTokenizer, TFLayoutLMModel\n >>> import tensorflow as tf\n\n >>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')\n >>> model = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased')\n\n >>> words = [\"Hello\", \"world\"]\n >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]\n\n >>> token_boxes = []\n >>> for word, box in zip(words, normalized_word_boxes):\n ... word_tokens = tokenizer.tokenize(word)\n ... token_boxes.extend([box] * len(word_tokens))\n >>> # add bounding boxes of cls + sep tokens\n >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]\n\n >>> encoding = tokenizer(' '.join(words), return_tensors=\"tf\")\n >>> input_ids = encoding[\"input_ids\"]\n >>> attention_mask = encoding[\"attention_mask\"]\n >>> token_type_ids = encoding[\"token_type_ids\"]\n >>> bbox = tf.convert_to_tensor([token_boxes])\n\n >>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids)\n\n >>> last_hidden_states = outputs.last_hidden_state\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.layoutlm(\n input_ids=inputs[\"input_ids\"],\n bbox=inputs[\"bbox\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n return outputs\n\n # Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output\n def serving_output(\n self, output: TFBaseModelOutputWithPoolingAndCrossAttentions\n ) -> TFBaseModelOutputWithPoolingAndCrossAttentions:\n output_cache = self.config.use_cache and self.config.is_decoder\n pkv = tf.convert_to_tensor(output.past_key_values) if output_cache else None\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n cross_attns = tf.convert_to_tensor(output.cross_attentions) if output.cross_attentions is not None else None\n if not (self.config.output_attentions and self.config.add_cross_attention):\n cross_attns = None\n\n return TFBaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=output.last_hidden_state,\n pooler_output=output.pooler_output,\n past_key_values=pkv,\n hidden_states=hs,\n attentions=attns,\n cross_attentions=cross_attns,\n )\n\n\n@add_start_docstrings(\"\"\"LayoutLM Model with a `language modeling` head on top. \"\"\", LAYOUTLM_START_DOCSTRING)\nclass TFLayoutLMForMaskedLM(TFLayoutLMPreTrainedModel, TFMaskedLanguageModelingLoss):\n # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model\n _keys_to_ignore_on_load_unexpected = [\n r\"pooler\",\n r\"cls.seq_relationship\",\n r\"cls.predictions.decoder.weight\",\n r\"nsp___cls\",\n ]\n\n def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `TFLayoutLMForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name=\"layoutlm\")\n self.mlm = TFLayoutLMMLMHead(config, input_embeddings=self.layoutlm.embeddings, name=\"mlm___cls\")\n\n def get_lm_head(self) -> tf.keras.layers.Layer:\n return self.mlm.predictions\n\n def get_prefix_bias_name(self) -> str:\n warnings.warn(\"The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.\", FutureWarning)\n return self.name + \"/\" + self.mlm.name + \"/\" + self.mlm.predictions.name\n\n @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n bbox: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n\n Returns:\n\n Examples::\n\n >>> from transformers import LayoutLMTokenizer, TFLayoutLMForMaskedLM\n >>> import tensorflow as tf\n\n >>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')\n >>> model = TFLayoutLMForMaskedLM.from_pretrained('microsoft/layoutlm-base-uncased')\n\n >>> words = [\"Hello\", \"[MASK]\"]\n >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]\n\n >>> token_boxes = []\n >>> for word, box in zip(words, normalized_word_boxes):\n ... word_tokens = tokenizer.tokenize(word)\n ... token_boxes.extend([box] * len(word_tokens))\n >>> # add bounding boxes of cls + sep tokens\n >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]\n\n >>> encoding = tokenizer(' '.join(words), return_tensors=\"tf\")\n >>> input_ids = encoding[\"input_ids\"]\n >>> attention_mask = encoding[\"attention_mask\"]\n >>> token_type_ids = encoding[\"token_type_ids\"]\n >>> bbox = tf.convert_to_tensor([token_boxes])\n\n >>> labels = tokenizer(\"Hello world\", return_tensors=\"tf\")[\"input_ids\"]\n\n >>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,\n ... labels=labels)\n\n >>> loss = outputs.loss\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.layoutlm(\n input_ids=inputs[\"input_ids\"],\n bbox=inputs[\"bbox\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs[\"training\"])\n loss = (\n None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=prediction_scores)\n )\n\n if not inputs[\"return_dict\"]:\n output = (prediction_scores,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFMaskedLMOutput(\n loss=loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n LayoutLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n LAYOUTLM_START_DOCSTRING,\n)\nclass TFLayoutLMForSequenceClassification(TFLayoutLMPreTrainedModel, TFSequenceClassificationLoss):\n # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model\n _keys_to_ignore_on_load_unexpected = [r\"mlm___cls\", r\"nsp___cls\", r\"cls.predictions\", r\"cls.seq_relationship\"]\n _keys_to_ignore_on_load_missing = [r\"dropout\"]\n\n def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.layoutlm = TFLayoutLMMainLayer(config, name=\"layoutlm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n units=config.num_labels,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"classifier\",\n )\n\n @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n bbox: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n\n Examples::\n\n >>> from transformers import LayoutLMTokenizer, TFLayoutLMForSequenceClassification\n >>> import tensorflow as tf\n\n >>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')\n >>> model = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased')\n\n >>> words = [\"Hello\", \"world\"]\n >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]\n\n >>> token_boxes = []\n >>> for word, box in zip(words, normalized_word_boxes):\n ... word_tokens = tokenizer.tokenize(word)\n ... token_boxes.extend([box] * len(word_tokens))\n >>> # add bounding boxes of cls + sep tokens\n >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]\n\n >>> encoding = tokenizer(' '.join(words), return_tensors=\"tf\")\n >>> input_ids = encoding[\"input_ids\"]\n >>> attention_mask = encoding[\"attention_mask\"]\n >>> token_type_ids = encoding[\"token_type_ids\"]\n >>> bbox = tf.convert_to_tensor([token_boxes])\n >>> sequence_label = tf.convert_to_tensor([1])\n\n >>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,\n ... labels=sequence_label)\n\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.layoutlm(\n input_ids=inputs[\"input_ids\"],\n bbox=inputs[\"bbox\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n pooled_output = outputs[1]\n pooled_output = self.dropout(inputs=pooled_output, training=inputs[\"training\"])\n logits = self.classifier(inputs=pooled_output)\n loss = None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n LAYOUTLM_START_DOCSTRING,\n)\nclass TFLayoutLMForTokenClassification(TFLayoutLMPreTrainedModel, TFTokenClassificationLoss):\n # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model\n _keys_to_ignore_on_load_unexpected = [\n r\"pooler\",\n r\"mlm___cls\",\n r\"nsp___cls\",\n r\"cls.predictions\",\n r\"cls.seq_relationship\",\n ]\n _keys_to_ignore_on_load_missing = [r\"dropout\"]\n\n def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name=\"layoutlm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n units=config.num_labels,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"classifier\",\n )\n\n @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n bbox: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n\n Returns:\n\n Examples::\n\n >>> from transformers import LayoutLMTokenizer, TFLayoutLMForTokenClassification\n >>> import torch\n\n >>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')\n >>> model = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased')\n\n >>> words = [\"Hello\", \"world\"]\n >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]\n\n >>> token_boxes = []\n >>> for word, box in zip(words, normalized_word_boxes):\n ... word_tokens = tokenizer.tokenize(word)\n ... token_boxes.extend([box] * len(word_tokens))\n >>> # add bounding boxes of cls + sep tokens\n >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]\n\n >>> encoding = tokenizer(' '.join(words), return_tensors=\"tf\")\n >>> input_ids = encoding[\"input_ids\"]\n >>> attention_mask = encoding[\"attention_mask\"]\n >>> token_type_ids = encoding[\"token_type_ids\"]\n >>> bbox = tf.convert_to_tensor([token_boxes])\n >>> token_labels = tf.convert_to_tensor([1,1,0,0])\n\n >>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,\n ... labels=token_labels)\n\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.layoutlm(\n input_ids=inputs[\"input_ids\"],\n bbox=inputs[\"bbox\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n sequence_output = self.dropout(inputs=sequence_output, training=inputs[\"training\"])\n logits = self.classifier(inputs=sequence_output)\n loss = None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFTokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n",
"# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 RoFormer model. \"\"\"\n\n\nimport math\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...file_utils import (\n MULTIPLE_CHOICE_DUMMY_INPUTS,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n)\nfrom ...modeling_tf_outputs import (\n TFBaseModelOutput,\n TFBaseModelOutputWithPooling,\n TFCausalLMOutput,\n TFMaskedLMOutput,\n TFMultipleChoiceModelOutput,\n TFQuestionAnsweringModelOutput,\n TFSequenceClassifierOutput,\n TFTokenClassifierOutput,\n)\nfrom ...modeling_tf_utils import (\n TFCausalLanguageModelingLoss,\n TFMaskedLanguageModelingLoss,\n TFModelInputType,\n TFMultipleChoiceLoss,\n TFPreTrainedModel,\n TFQuestionAnsweringLoss,\n TFSequenceClassificationLoss,\n TFSequenceSummary,\n TFTokenClassificationLoss,\n get_initializer,\n input_processing,\n keras_serializable,\n shape_list,\n)\nfrom ...utils import logging\nfrom .configuration_roformer import RoFormerConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"junnyu/roformer_chinese_base\"\n_CONFIG_FOR_DOC = \"RoFormerConfig\"\n_TOKENIZER_FOR_DOC = \"RoFormerTokenizer\"\n\nTF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"junnyu/roformer_chinese_small\",\n \"junnyu/roformer_chinese_base\",\n \"junnyu/roformer_chinese_char_small\",\n \"junnyu/roformer_chinese_char_base\",\n \"junnyu/roformer_small_discriminator\",\n \"junnyu/roformer_small_generator\"\n # See all RoFormer models at https://huggingface.co/models?filter=roformer\n]\n\n\nclass TFRoFormerSinusoidalPositionalEmbedding(tf.keras.layers.Layer):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\"\"\"\n\n def __init__(self, num_positions: int, embedding_dim: int, **kwargs):\n super().__init__(**kwargs)\n\n if embedding_dim % 2 != 0:\n raise NotImplementedError(f\"odd embedding_dim {embedding_dim} not supported\")\n\n self.embedding_dim = embedding_dim\n self.num_positions = num_positions\n\n def build(self, input_shape: tf.TensorShape):\n \"\"\"\n Build shared token embedding layer Shared weights logic adapted from\n https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24\n \"\"\"\n\n weight = self._init_weight(self.num_positions, self.embedding_dim)\n\n self.weight = self.add_weight(\n name=\"embeddings\",\n shape=[self.num_positions, self.embedding_dim],\n )\n weight = tf.cast(weight, dtype=self.weight.dtype)\n\n self.weight.assign(weight)\n\n super().build(input_shape)\n\n @staticmethod\n def _init_weight(n_pos: int, dim: int):\n \"\"\"\n Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in\n the 2nd half of the vector. [dim // 2:]\n \"\"\"\n position_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]\n )\n table = np.zeros_like(position_enc)\n # index 0 is all zero\n table[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])\n table[:, dim // 2 :] = np.cos(position_enc[:, 1::2])\n # convert to tensor\n table = tf.convert_to_tensor(table)\n tf.stop_gradient(table)\n return table\n\n def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_shape[:2]\n\n positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name=\"range\")\n return tf.gather(self.weight, positions)\n\n\nclass TFRoFormerEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.type_vocab_size = config.type_vocab_size\n self.embedding_size = config.embedding_size\n self.initializer_range = config.initializer_range\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def build(self, input_shape: tf.TensorShape):\n with tf.name_scope(\"word_embeddings\"):\n self.weight = self.add_weight(\n name=\"weight\",\n shape=[self.vocab_size, self.embedding_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"token_type_embeddings\"):\n self.token_type_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.type_vocab_size, self.embedding_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n super().build(input_shape)\n\n def call(\n self,\n input_ids: tf.Tensor = None,\n token_type_ids: tf.Tensor = None,\n inputs_embeds: tf.Tensor = None,\n training: bool = False,\n ) -> tf.Tensor:\n \"\"\"\n Applies embedding based on inputs tensor.\n\n\n Returns:\n final_embeddings (:obj:`tf.Tensor`): output embedding tensor.\n \"\"\"\n assert not (input_ids is None and inputs_embeds is None)\n\n if input_ids is not None:\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n\n input_shape = shape_list(inputs_embeds)[:-1]\n\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n\n return final_embeddings\n\n\nclass TFRoFormerSelfAttention(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number \"\n f\"of attention heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.sqrt_att_head_size = math.sqrt(self.attention_head_size)\n\n self.query = tf.keras.layers.Dense(\n units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"query\"\n )\n self.key = tf.keras.layers.Dense(\n units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"key\"\n )\n self.value = tf.keras.layers.Dense(\n units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"value\"\n )\n self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)\n self.rotary_value = config.rotary_value\n\n def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:\n # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]\n tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))\n\n # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]\n return tf.transpose(tensor, perm=[0, 2, 1, 3])\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n sinusoidal_pos: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n batch_size = shape_list(hidden_states)[0]\n mixed_query_layer = self.query(inputs=hidden_states)\n mixed_key_layer = self.key(inputs=hidden_states)\n mixed_value_layer = self.value(inputs=hidden_states)\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n if sinusoidal_pos is not None:\n if self.rotary_value:\n query_layer, key_layer, value_layer = self.apply_rotary_position_embeddings(\n sinusoidal_pos, query_layer, key_layer, value_layer\n )\n else:\n query_layer, key_layer = self.apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n # (batch size, num_heads, seq_len_q, seq_len_k)\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)\n attention_scores = tf.divide(attention_scores, dk)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in TFRoFormerModel call() function)\n attention_scores = tf.add(attention_scores, attention_mask)\n\n # Normalize the attention scores to probabilities.\n attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(inputs=attention_probs, training=training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = tf.multiply(attention_probs, head_mask)\n\n attention_output = tf.matmul(attention_probs, value_layer)\n attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])\n\n # (batch_size, seq_len_q, all_head_size)\n attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))\n outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)\n\n return outputs\n\n @staticmethod\n def apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer, value_layer=None):\n # https://kexue.fm/archives/8265\n # sin [batch_size, num_heads, sequence_length, embed_size_per_head//2]\n # cos [batch_size, num_heads, sequence_length, embed_size_per_head//2]\n sin, cos = tf.split(sinusoidal_pos, num_or_size_splits=2, axis=-1)\n # sin [θ0,θ1,θ2......θd/2-1]-> sin_pos [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]\n # cos [θ0,θ1,θ2......θd/2-1]-> cos_pos [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]\n sin_pos = tf.repeat(sin, 2, axis=-1)\n cos_pos = tf.repeat(cos, 2, axis=-1)\n # rotate_half_query_layer [-q1,q0,-q3,q2......,-qd-1,qd-2]\n rotate_half_query_layer = tf.stack([-query_layer[..., 1::2], query_layer[..., ::2]], axis=-1)\n rotate_half_query_layer = tf.reshape(rotate_half_query_layer, shape_list(query_layer))\n query_layer = query_layer * cos_pos + rotate_half_query_layer * sin_pos\n # rotate_half_key_layer [-k1,k0,-k3,k2......,-kd-1,kd-2]\n rotate_half_key_layer = tf.stack([-key_layer[..., 1::2], key_layer[..., ::2]], axis=-1)\n rotate_half_key_layer = tf.reshape(rotate_half_key_layer, shape_list(key_layer))\n key_layer = key_layer * cos_pos + rotate_half_key_layer * sin_pos\n if value_layer is not None:\n # rotate_half_value_layer [-v1,v0,-v3,v2......,-vd-1,vd-2]\n rotate_half_value_layer = tf.stack([-value_layer[..., 1::2], value_layer[..., ::2]], axis=-1)\n rotate_half_value_layer = tf.reshape(rotate_half_value_layer, shape_list(value_layer))\n value_layer = value_layer * cos_pos + rotate_half_value_layer * sin_pos\n return query_layer, key_layer, value_layer\n return query_layer, key_layer\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->RoFormer\nclass TFRoFormerSelfOutput(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)\n\n return hidden_states\n\n\nclass TFRoFormerAttention(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.self_attention = TFRoFormerSelfAttention(config, name=\"self\")\n self.dense_output = TFRoFormerSelfOutput(config, name=\"output\")\n\n def prune_heads(self, heads):\n raise NotImplementedError\n\n def call(\n self,\n input_tensor: tf.Tensor,\n attention_mask: tf.Tensor,\n sinusoidal_pos: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n self_outputs = self.self_attention(\n hidden_states=input_tensor,\n attention_mask=attention_mask,\n sinusoidal_pos=sinusoidal_pos,\n head_mask=head_mask,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = self.dense_output(\n hidden_states=self_outputs[0], input_tensor=input_tensor, training=training\n )\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->RoFormer\nclass TFRoFormerIntermediate(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->RoFormer\nclass TFRoFormerOutput(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)\n\n return hidden_states\n\n\nclass TFRoFormerLayer(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.attention = TFRoFormerAttention(config, name=\"attention\")\n self.intermediate = TFRoFormerIntermediate(config, name=\"intermediate\")\n self.roformer_output = TFRoFormerOutput(config, name=\"output\")\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n sinusoidal_pos: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n attention_outputs = self.attention(\n input_tensor=hidden_states,\n attention_mask=attention_mask,\n sinusoidal_pos=sinusoidal_pos,\n head_mask=head_mask,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(hidden_states=attention_output)\n layer_output = self.roformer_output(\n hidden_states=intermediate_output, input_tensor=attention_output, training=training\n )\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\n\n return outputs\n\n\nclass TFRoFormerEncoder(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n self.embed_positions = TFRoFormerSinusoidalPositionalEmbedding(\n config.max_position_embeddings,\n config.hidden_size // config.num_attention_heads,\n name=\"embed_positions\",\n )\n self.layer = [TFRoFormerLayer(config, name=f\"layer_._{i}\") for i in range(config.num_hidden_layers)]\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n output_hidden_states: bool,\n return_dict: bool,\n training: bool = False,\n ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n # [sequence_length, embed_size_per_head] -> [batch_size, num_heads, sequence_length, embed_size_per_head]\n sinusoidal_pos = self.embed_positions(shape_list(hidden_states)[:-1])[None, None, :, :]\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n sinusoidal_pos=sinusoidal_pos,\n head_mask=head_mask[i],\n output_attentions=output_attentions,\n training=training,\n )\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n\n return TFBaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass TFRoFormerPredictionHeadTransform(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.embedding_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.transform_act_fn = config.hidden_act\n\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(inputs=hidden_states)\n\n return hidden_states\n\n\nclass TFRoFormerLMPredictionHead(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.embedding_size = config.embedding_size\n\n self.transform = TFRoFormerPredictionHeadTransform(config, name=\"transform\")\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape: tf.TensorShape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n\n super().build(input_shape)\n\n def get_output_embeddings(self) -> tf.keras.layers.Layer:\n return self.input_embeddings\n\n def set_output_embeddings(self, value: tf.Variable):\n self.input_embeddings.weight = value\n self.input_embeddings.vocab_size = shape_list(value)[0]\n\n def get_bias(self) -> Dict[str, tf.Variable]:\n return {\"bias\": self.bias}\n\n def set_bias(self, value: tf.Variable):\n self.bias = value[\"bias\"]\n self.vocab_size = shape_list(value[\"bias\"])[0]\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.transform(hidden_states=hidden_states)\n seq_length = shape_list(hidden_states)[1]\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])\n hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])\n hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->RoFormer\nclass TFRoFormerMLMHead(tf.keras.layers.Layer):\n def __init__(self, config: RoFormerConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n\n self.predictions = TFRoFormerLMPredictionHead(config, input_embeddings, name=\"predictions\")\n\n def call(self, sequence_output: tf.Tensor) -> tf.Tensor:\n prediction_scores = self.predictions(hidden_states=sequence_output)\n\n return prediction_scores\n\n\n@keras_serializable\nclass TFRoFormerMainLayer(tf.keras.layers.Layer):\n config_class = RoFormerConfig\n\n def __init__(self, config: RoFormerConfig, add_pooling_layer: bool = True, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n\n self.embeddings = TFRoFormerEmbeddings(config, name=\"embeddings\")\n if config.embedding_size != config.hidden_size:\n self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name=\"embeddings_project\")\n\n self.encoder = TFRoFormerEncoder(config, name=\"encoder\")\n\n def get_input_embeddings(self) -> tf.keras.layers.Layer:\n return self.embeddings\n\n def set_input_embeddings(self, value: tf.Variable):\n self.embeddings.weight = value\n self.embeddings.vocab_size = shape_list(value)[0]\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: bool = False,\n **kwargs,\n ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs[\"attention_mask\"] is None:\n inputs[\"attention_mask\"] = tf.fill(dims=input_shape, value=1)\n\n if inputs[\"token_type_ids\"] is None:\n inputs[\"token_type_ids\"] = tf.fill(dims=input_shape, value=0)\n\n embedding_output = self.embeddings(\n input_ids=inputs[\"input_ids\"],\n token_type_ids=inputs[\"token_type_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n training=inputs[\"training\"],\n )\n if hasattr(self, \"embeddings_project\"):\n embedding_output = self.embeddings_project(embedding_output, training=inputs[\"training\"])\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = tf.reshape(inputs[\"attention_mask\"], (input_shape[0], 1, 1, input_shape[1]))\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)\n one_cst = tf.constant(1.0, dtype=embedding_output.dtype)\n ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)\n extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if inputs[\"head_mask\"] is not None:\n raise NotImplementedError\n else:\n inputs[\"head_mask\"] = [None] * self.config.num_hidden_layers\n\n encoder_outputs = self.encoder(\n hidden_states=embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=inputs[\"head_mask\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n sequence_output = encoder_outputs[0]\n\n if not inputs[\"return_dict\"]:\n return (sequence_output,) + encoder_outputs[1:]\n\n return TFBaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\nclass TFRoFormerPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RoFormerConfig\n base_model_prefix = \"roformer\"\n\n\nROFORMER_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the\n generic methods the library implements for all its model (such as downloading or saving, resizing the input\n embeddings, pruning heads etc.)\n\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use\n it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage\n and behavior.\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all\n the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in\n the first positional argument :\n\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Args:\n config (:class:`~transformers.RoFormerConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nROFORMER_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`np.ndarray`, :obj:`tf.Tensor`, :obj:`List[tf.Tensor]` :obj:`Dict[str, tf.Tensor]` or :obj:`Dict[str, np.ndarray]` and each example must have the shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.RoFormerTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`__\n head_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This\n argument can be used in eager mode, in graph mode the value will always be set to True.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RoFormer Model transformer outputing raw hidden-states without any specific head on top.\",\n ROFORMER_START_DOCSTRING,\n)\nclass TFRoFormerModel(TFRoFormerPreTrainedModel):\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.roformer = TFRoFormerMainLayer(config, name=\"roformer\")\n\n @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.roformer(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n return outputs\n\n def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\"\"\"RoFormer Model with a `language modeling` head on top. \"\"\", ROFORMER_START_DOCSTRING)\nclass TFRoFormerForMaskedLM(TFRoFormerPreTrainedModel, TFMaskedLanguageModelingLoss):\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `TFRoFormerForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.roformer = TFRoFormerMainLayer(config, name=\"roformer\")\n self.mlm = TFRoFormerMLMHead(config, input_embeddings=self.roformer.embeddings, name=\"mlm___cls\")\n\n def get_lm_head(self) -> tf.keras.layers.Layer:\n return self.mlm.predictions\n\n @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFMaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.roformer(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs[\"training\"])\n loss = (\n None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=prediction_scores)\n )\n\n if not inputs[\"return_dict\"]:\n output = (prediction_scores,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFMaskedLMOutput(\n loss=loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"RoFormer Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", ROFORMER_START_DOCSTRING\n)\nclass TFRoFormerForCausalLM(TFRoFormerPreTrainedModel, TFCausalLanguageModelingLoss):\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `TFRoFormerForCausalLM` as a standalone, add `is_decoder=True.`\")\n\n self.roformer = TFRoFormerMainLayer(config, name=\"roformer\")\n self.mlm = TFRoFormerMLMHead(config, input_embeddings=self.roformer.embeddings, name=\"mlm___cls\")\n\n def get_lm_head(self) -> tf.keras.layers.Layer:\n return self.mlm.predictions\n\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFCausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,\n config.vocab_size - 1]``.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.roformer(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n logits = self.mlm(sequence_output=sequence_output, training=inputs[\"training\"])\n loss = None\n\n if inputs[\"labels\"] is not None:\n # shift labels to the left and cut last logit token\n logits = logits[:, :-1]\n labels = inputs[\"labels\"][:, 1:]\n loss = self.compute_loss(labels=labels, logits=logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFCausalLMOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\nclass TFRoFormerClassificationHead(tf.keras.layers.Layer):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n self.out_proj = tf.keras.layers.Dense(\n units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"out_proj\"\n )\n\n if isinstance(config.hidden_act, str):\n self.classifier_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.classifier_act_fn = config.hidden_act\n\n def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:\n hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.classifier_act_fn(hidden_states)\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n hidden_states = self.out_proj(hidden_states)\n\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n RoFormer Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.\n \"\"\",\n ROFORMER_START_DOCSTRING,\n)\nclass TFRoFormerForSequenceClassification(TFRoFormerPreTrainedModel, TFSequenceClassificationLoss):\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.roformer = TFRoFormerMainLayer(config, name=\"roformer\")\n self.classifier = TFRoFormerClassificationHead(config, name=\"classifier\")\n\n @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.roformer(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n logits = self.classifier(hidden_states=outputs[0], training=inputs[\"training\"])\n loss = None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + outputs[1:]\n\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ROFORMER_START_DOCSTRING,\n)\nclass TFRoFormerForMultipleChoice(TFRoFormerPreTrainedModel, TFMultipleChoiceLoss):\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.roformer = TFRoFormerMainLayer(config, name=\"roformer\")\n self.sequence_summary = TFSequenceSummary(config, config.initializer_range, name=\"sequence_summary\")\n self.classifier = tf.keras.layers.Dense(\n units=1, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @property\n def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n \"\"\"\n Dummy inputs to build the network.\n\n\n Returns:\n tf.Tensor with dummy inputs\n \"\"\"\n return {\"input_ids\": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}\n\n @add_start_docstrings_to_model_forward(\n ROFORMER_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\")\n )\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFMultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None:\n num_choices = shape_list(inputs[\"input_ids\"])[1]\n seq_length = shape_list(inputs[\"input_ids\"])[2]\n else:\n num_choices = shape_list(inputs[\"inputs_embeds\"])[1]\n seq_length = shape_list(inputs[\"inputs_embeds\"])[2]\n\n flat_input_ids = (\n tf.reshape(tensor=inputs[\"input_ids\"], shape=(-1, seq_length)) if inputs[\"input_ids\"] is not None else None\n )\n flat_attention_mask = (\n tf.reshape(tensor=inputs[\"attention_mask\"], shape=(-1, seq_length))\n if inputs[\"attention_mask\"] is not None\n else None\n )\n flat_token_type_ids = (\n tf.reshape(tensor=inputs[\"token_type_ids\"], shape=(-1, seq_length))\n if inputs[\"token_type_ids\"] is not None\n else None\n )\n flat_inputs_embeds = (\n tf.reshape(tensor=inputs[\"inputs_embeds\"], shape=(-1, seq_length, shape_list(inputs[\"inputs_embeds\"])[3]))\n if inputs[\"inputs_embeds\"] is not None\n else None\n )\n outputs = self.roformer(\n input_ids=flat_input_ids,\n attention_mask=flat_attention_mask,\n token_type_ids=flat_token_type_ids,\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=flat_inputs_embeds,\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n logits = self.sequence_summary(inputs=outputs[0], training=inputs[\"training\"])\n logits = self.classifier(inputs=logits)\n reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))\n loss = None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=reshaped_logits)\n\n if not inputs[\"return_dict\"]:\n output = (reshaped_logits,) + outputs[1:]\n\n return ((loss,) + output) if loss is not None else output\n\n return TFMultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n @tf.function(\n input_signature=[\n {\n \"input_ids\": tf.TensorSpec((None, None, None), tf.int32, name=\"input_ids\"),\n \"attention_mask\": tf.TensorSpec((None, None, None), tf.int32, name=\"attention_mask\"),\n \"token_type_ids\": tf.TensorSpec((None, None, None), tf.int32, name=\"token_type_ids\"),\n }\n ]\n )\n def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput:\n output = self.call(input_ids=inputs)\n\n return self.serving_output(output)\n\n def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ROFORMER_START_DOCSTRING,\n)\nclass TFRoFormerForTokenClassification(TFRoFormerPreTrainedModel, TFTokenClassificationLoss):\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.roformer = TFRoFormerMainLayer(config, name=\"roformer\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFTokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.roformer(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n sequence_output = self.dropout(inputs=sequence_output, training=inputs[\"training\"])\n logits = self.classifier(inputs=sequence_output)\n loss = None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFTokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ROFORMER_START_DOCSTRING,\n)\nclass TFRoFormerForQuestionAnswering(TFRoFormerPreTrainedModel, TFQuestionAnsweringLoss):\n def __init__(self, config: RoFormerConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.roformer = TFRoFormerMainLayer(config, name=\"roformer\")\n self.qa_outputs = tf.keras.layers.Dense(\n units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"qa_outputs\"\n )\n\n @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,\n end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n start_positions=start_positions,\n end_positions=end_positions,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.roformer(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n logits = self.qa_outputs(inputs=sequence_output)\n start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)\n start_logits = tf.squeeze(input=start_logits, axis=-1)\n end_logits = tf.squeeze(input=end_logits, axis=-1)\n loss = None\n\n if inputs[\"start_positions\"] is not None and inputs[\"end_positions\"] is not None:\n labels = {\"start_position\": inputs[\"start_positions\"]}\n labels[\"end_position\"] = inputs[\"end_positions\"]\n loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))\n\n if not inputs[\"return_dict\"]:\n output = (start_logits, end_logits) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFQuestionAnsweringModelOutput(\n loss=loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFQuestionAnsweringModelOutput(\n start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns\n )\n"
] |
[
[
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.ragged.constant",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.data.Options",
"numpy.array"
],
[
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.zeros",
"torch.from_numpy",
"torch.nn.Embedding",
"numpy.transpose",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.matmul",
"torch.tanh",
"torch.arange",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"tensorflow.train.list_variables",
"torch.nn.MSELoss",
"torch.nn.functional.pad"
],
[
"torch.nn.init.uniform_",
"torch.zeros",
"torch.cat",
"torch.sin",
"torch.nn.Embedding",
"torch.tanh",
"torch.repeat_interleave",
"torch.nn.BCEWithLogitsLoss",
"torch.split",
"torch.nn.Dropout",
"torch.softmax",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.einsum",
"torch.reshape",
"torch.from_numpy",
"torch.tensor",
"torch.arange",
"tensorflow.train.list_variables",
"torch.nn.functional.max_pool2d",
"torch.cos",
"torch.nn.functional.pad",
"torch.nn.init.constant_",
"torch.nn.functional.avg_pool2d",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.nn.init.normal_",
"numpy.transpose",
"torch.nn.LayerNorm",
"torch.gather",
"torch.nn.MSELoss"
],
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.convert_to_tensor",
"tensorflow.cast",
"tensorflow.divide",
"tensorflow.subtract",
"tensorflow.gather",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.nn.bias_add",
"tensorflow.concatenate",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.multiply",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.keras.layers.Dropout"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.stack",
"tensorflow.cast",
"numpy.zeros_like",
"tensorflow.squeeze",
"numpy.sin",
"tensorflow.stop_gradient",
"tensorflow.gather",
"tensorflow.divide",
"tensorflow.subtract",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.fill",
"numpy.power",
"tensorflow.keras.layers.Dense",
"tensorflow.split",
"tensorflow.nn.bias_add",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.multiply",
"tensorflow.reshape",
"numpy.cos",
"tensorflow.repeat",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
4kubo/rllib
|
[
"4f9f5f49916c7681675305b6c9a276b9e88c5e22"
] |
[
"rllib/util/tests/test_value_estimation.py"
] |
[
"import numpy as np\nimport pytest\nimport scipy\nimport torch\nimport torch.testing\n\nfrom rllib.dataset.datatypes import Observation\nfrom rllib.dataset.utilities import stack_list_of_tuples\nfrom rllib.util.value_estimation import discount_cumsum, discount_sum, mc_return\n\n\nclass TestDiscountedCumSum(object):\n @pytest.fixture(params=[1, 0.99, 0.9, 0], scope=\"class\")\n def gamma(self, request):\n return request.param\n\n @pytest.fixture(\n params=[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 2, 1, 0.2, 0.4]], scope=\"class\"\n )\n def rewards(self, request):\n return request.param\n\n @pytest.fixture(params=[True, False], scope=\"class\")\n def batch(self, request):\n return request.param\n\n def test_correctness(self, gamma, batch):\n rewards = np.array([[1, 2], [0.5, 0.3], [2, -1.2], [-0.2, 0.5]])\n\n cum_rewards = np.array(\n [\n [\n 1 + 0.5 * gamma + 2 * gamma ** 2 - 0.2 * gamma ** 3,\n 2 + 0.3 * gamma - 1.2 * gamma ** 2 + 0.5 * gamma ** 3,\n ],\n [\n 0.5 + 2 * gamma - 0.2 * gamma ** 2,\n 0.3 - 1.2 * gamma + 0.5 * gamma ** 2,\n ],\n [2 - 0.2 * gamma, -1.2 + 0.5 * gamma],\n [-0.2, 0.5],\n ]\n )\n if batch:\n rewards = np.tile(np.array(rewards), (5, 1, 1))\n cum_rewards = np.tile(np.array(cum_rewards), (5, 1, 1))\n assert scipy.allclose(cum_rewards, discount_cumsum(np.array(rewards), gamma))\n\n torch.testing.assert_allclose(\n torch.tensor(cum_rewards), discount_cumsum(torch.tensor(rewards), gamma)\n )\n torch.testing.assert_allclose(\n torch.tensor(cum_rewards[..., 0, :], dtype=torch.get_default_dtype()),\n discount_sum(torch.tensor(rewards, dtype=torch.get_default_dtype()), gamma),\n )\n\n for i in range(rewards.shape[-1]):\n torch.testing.assert_allclose(\n torch.tensor(cum_rewards)[..., [i]],\n discount_cumsum(torch.tensor(rewards)[..., [i]], gamma),\n )\n torch.testing.assert_allclose(\n torch.tensor(cum_rewards[..., 0, [i]], dtype=torch.get_default_dtype()),\n discount_sum(\n torch.tensor(rewards[..., [i]], dtype=torch.get_default_dtype()),\n gamma,\n ),\n )\n\n def test_shape_and_type(self, rewards, gamma):\n np_returns = discount_cumsum(np.atleast_2d(np.array(rewards)).T, gamma)\n assert np_returns.shape == (len(rewards), 1)\n assert type(np_returns) is np.ndarray\n\n t_returns = discount_cumsum(\n torch.tensor(rewards, dtype=torch.get_default_dtype()).unsqueeze(-1), gamma\n )\n assert t_returns.shape == torch.Size((len(rewards), 1))\n assert type(t_returns) is torch.Tensor\n\n torch.testing.assert_allclose(t_returns, np_returns)\n\n\nclass TestMCReturn(object):\n @pytest.fixture(params=[1, 0.99, 0.9, 0.5, 0], scope=\"class\")\n def gamma(self, request):\n return request.param\n\n @pytest.fixture(params=[True, False], scope=\"class\")\n def value_function(self, request):\n if request:\n return lambda x: torch.tensor([0.01])\n else:\n return None\n\n @pytest.fixture(params=[1, 0.1, 0], scope=\"class\")\n def entropy_reg(self, request):\n return request.param\n\n def test_correctness(self, gamma, value_function, entropy_reg):\n trajectory = [\n Observation(0, 0, reward=np.array([1]), done=False, entropy=0.2).to_torch(),\n Observation(\n 0, 0, reward=np.array([0.5]), done=False, entropy=0.3\n ).to_torch(),\n Observation(0, 0, reward=np.array([2]), done=False, entropy=0.5).to_torch(),\n Observation(\n 0, 0, reward=np.array([-0.2]), done=False, entropy=-0.2\n ).to_torch(),\n ]\n\n r0 = 1 + entropy_reg * 0.2\n r1 = 0.5 + entropy_reg * 0.3\n r2 = 2 + entropy_reg * 0.5\n r3 = -0.2 - entropy_reg * 0.2\n\n v = 0.01 if value_function is not None else 0\n\n reward = mc_return(\n stack_list_of_tuples(trajectory, 0),\n gamma,\n value_function=value_function,\n entropy_regularization=entropy_reg,\n reduction=\"min\",\n )\n\n torch.testing.assert_allclose(\n reward,\n torch.tensor(\n [r0 + r1 * gamma + r2 * gamma ** 2 + r3 * gamma ** 3 + v * gamma ** 4]\n ),\n )\n torch.testing.assert_allclose(\n mc_return(\n observation=Observation(state=0, reward=np.array([0])).to_torch(),\n gamma=gamma,\n value_function=value_function,\n entropy_regularization=entropy_reg,\n ),\n torch.tensor([0]),\n )\n"
] |
[
[
"torch.get_default_dtype",
"numpy.array",
"torch.testing.assert_allclose",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juhyeonkim95/PyOptiX
|
[
"c7510ee9d967fe6c22fddcdcdd3b0127e075c8ba"
] |
[
"pyoptix/enums.py"
] |
[
"import numpy\nfrom pyoptix._driver import RTobjecttype, RTformat, RTfiltermode, RTwrapmode, RTtexturereadmode, \\\n RTtextureindexmode, RTbuffertype, RTbufferflag, RTexception\n\n\nclass Format:\n unknown = RTformat.RT_FORMAT_UNKNOWN\n float = RTformat.RT_FORMAT_FLOAT\n float2 = RTformat.RT_FORMAT_FLOAT2\n float3 = RTformat.RT_FORMAT_FLOAT3\n float4 = RTformat.RT_FORMAT_FLOAT4\n byte = RTformat.RT_FORMAT_BYTE\n byte2 = RTformat.RT_FORMAT_BYTE2\n byte3 = RTformat.RT_FORMAT_BYTE3\n byte4 = RTformat.RT_FORMAT_BYTE4\n unsigned_byte = RTformat.RT_FORMAT_UNSIGNED_BYTE\n unsigned_byte2 = RTformat.RT_FORMAT_UNSIGNED_BYTE2\n unsigned_byte3 = RTformat.RT_FORMAT_UNSIGNED_BYTE3\n unsigned_byte4 = RTformat.RT_FORMAT_UNSIGNED_BYTE4\n short = RTformat.RT_FORMAT_SHORT\n short2 = RTformat.RT_FORMAT_SHORT2\n short3 = RTformat.RT_FORMAT_SHORT3\n short4 = RTformat.RT_FORMAT_SHORT4\n unsigned_short = RTformat.RT_FORMAT_UNSIGNED_SHORT\n unsigned_short2 = RTformat.RT_FORMAT_UNSIGNED_SHORT2\n unsigned_short3 = RTformat.RT_FORMAT_UNSIGNED_SHORT3\n unsigned_short4 = RTformat.RT_FORMAT_UNSIGNED_SHORT4\n int = RTformat.RT_FORMAT_INT\n int2 = RTformat.RT_FORMAT_INT2\n int3 = RTformat.RT_FORMAT_INT3\n int4 = RTformat.RT_FORMAT_INT4\n unsigned_int = RTformat.RT_FORMAT_UNSIGNED_INT\n unsigned_int2 = RTformat.RT_FORMAT_UNSIGNED_INT2\n unsigned_int3 = RTformat.RT_FORMAT_UNSIGNED_INT3\n unsigned_int4 = RTformat.RT_FORMAT_UNSIGNED_INT4\n user = RTformat.RT_FORMAT_USER\n buffer_id = RTformat.RT_FORMAT_BUFFER_ID\n program_id = RTformat.RT_FORMAT_PROGRAM_ID\n\n\nclass BufferType:\n input = RTbuffertype.RT_BUFFER_INPUT\n output = RTbuffertype.RT_BUFFER_OUTPUT\n input_output = RTbuffertype.RT_BUFFER_INPUT_OUTPUT\n\n\nclass BufferFlag:\n gpu_local = RTbufferflag.RT_BUFFER_GPU_LOCAL\n copy_on_dirty = RTbufferflag.RT_BUFFER_COPY_ON_DIRTY\n\n\nclass WrapMode:\n repeat = RTwrapmode.RT_WRAP_REPEAT\n clamp_to_edge = RTwrapmode.RT_WRAP_CLAMP_TO_EDGE\n mirror = RTwrapmode.RT_WRAP_MIRROR\n clamp_to_border = RTwrapmode.RT_WRAP_CLAMP_TO_BORDER\n\n\nclass FilterMode:\n nearest = RTfiltermode.RT_FILTER_NEAREST\n linear = RTfiltermode.RT_FILTER_LINEAR\n none = RTfiltermode.RT_FILTER_NONE\n\n\nclass TextureReadMode:\n element_type = RTtexturereadmode.RT_TEXTURE_READ_ELEMENT_TYPE\n normalized_float = RTtexturereadmode.RT_TEXTURE_READ_NORMALIZED_FLOAT\n\n\nclass TextureIndexMode:\n normalized_coordinates = RTtextureindexmode.RT_TEXTURE_INDEX_NORMALIZED_COORDINATES\n array_index = RTtextureindexmode.RT_TEXTURE_INDEX_ARRAY_INDEX\n\n\nclass ExceptionType:\n program_id_invalid = RTexception.RT_EXCEPTION_PROGRAM_ID_INVALID\n texture_id_invalid = RTexception.RT_EXCEPTION_TEXTURE_ID_INVALID\n buffer_id_invalid = RTexception.RT_EXCEPTION_BUFFER_ID_INVALID\n index_out_of_bounds = RTexception.RT_EXCEPTION_INDEX_OUT_OF_BOUNDS\n stack_overflow = RTexception.RT_EXCEPTION_STACK_OVERFLOW\n buffer_index_out_of_bounds = RTexception.RT_EXCEPTION_BUFFER_INDEX_OUT_OF_BOUNDS\n invalid_ray = RTexception.RT_EXCEPTION_INVALID_RAY\n internal_error = RTexception.RT_EXCEPTION_INTERNAL_ERROR\n user = RTexception.RT_EXCEPTION_USER\n all = RTexception.RT_EXCEPTION_ALL\n\n\nclass ObjectType:\n unknown = RTobjecttype.RT_OBJECTTYPE_UNKNOWN\n group = RTobjecttype.RT_OBJECTTYPE_GROUP\n geometry_group = RTobjecttype.RT_OBJECTTYPE_GEOMETRY_GROUP\n transform = RTobjecttype.RT_OBJECTTYPE_TRANSFORM\n selector = RTobjecttype.RT_OBJECTTYPE_SELECTOR\n geometry_instance = RTobjecttype.RT_OBJECTTYPE_GEOMETRY_INSTANCE\n buffer = RTobjecttype.RT_OBJECTTYPE_BUFFER\n texture_sampler = RTobjecttype.RT_OBJECTTYPE_TEXTURE_SAMPLER\n object = RTobjecttype.RT_OBJECTTYPE_OBJECT\n matrix2x2 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT2x2\n matrix2x3 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT2x3\n matrix2x4 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT2x4\n matrix3x2 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT3x2\n matrix3x3 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT3x3\n matrix3x4 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT3x4\n matrix4x2 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT4x2\n matrix4x3 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT4x3\n matrix4x4 = RTobjecttype.RT_OBJECTTYPE_MATRIX_FLOAT4x4\n float = RTobjecttype.RT_OBJECTTYPE_FLOAT\n float2 = RTobjecttype.RT_OBJECTTYPE_FLOAT2\n float3 = RTobjecttype.RT_OBJECTTYPE_FLOAT3\n float4 = RTobjecttype.RT_OBJECTTYPE_FLOAT4\n int = RTobjecttype.RT_OBJECTTYPE_INT\n int2 = RTobjecttype.RT_OBJECTTYPE_INT2\n int3 = RTobjecttype.RT_OBJECTTYPE_INT3\n int4 = RTobjecttype.RT_OBJECTTYPE_INT4\n unsigned_int = RTobjecttype.RT_OBJECTTYPE_UNSIGNED_INT\n unsigned_int2 = RTobjecttype.RT_OBJECTTYPE_UNSIGNED_INT2\n unsigned_int3 = RTobjecttype.RT_OBJECTTYPE_UNSIGNED_INT3\n unsigned_int4 = RTobjecttype.RT_OBJECTTYPE_UNSIGNED_INT4\n user = RTobjecttype.RT_OBJECTTYPE_USER\n program = RTobjecttype.RT_OBJECTTYPE_PROGRAM\n\nOBJECT_TYPE_TO_DTYPE_SHAPE = {\n ObjectType.float: (numpy.float32, (1, )),\n ObjectType.float2: (numpy.float32, (2, )),\n ObjectType.float3: (numpy.float32, (3, )),\n ObjectType.float4: (numpy.float32, (4, )),\n\n ObjectType.int: (numpy.int32, (1, )),\n ObjectType.int2: (numpy.int32, (2, )),\n ObjectType.int3: (numpy.int32, (3, )),\n ObjectType.int4: (numpy.int32, (4, )),\n\n ObjectType.unsigned_int: (numpy.uint32, (1, )),\n ObjectType.unsigned_int2: (numpy.uint32, (2, )),\n ObjectType.unsigned_int3: (numpy.uint32, (3, )),\n ObjectType.unsigned_int4: (numpy.uint32, (4, )),\n\n ObjectType.matrix2x2: (numpy.float32, (2, 2)),\n ObjectType.matrix2x3: (numpy.float32, (2, 3)),\n ObjectType.matrix2x4: (numpy.float32, (2, 4)),\n ObjectType.matrix3x2: (numpy.float32, (3, 2)),\n ObjectType.matrix3x3: (numpy.float32, (3, 3)),\n ObjectType.matrix3x4: (numpy.float32, (3, 4)),\n ObjectType.matrix4x2: (numpy.float32, (4, 2)),\n ObjectType.matrix4x3: (numpy.float32, (4, 3)),\n ObjectType.matrix4x4: (numpy.float32, (4, 4)),\n}\n\nDTYPE_SHAPE_TO_OBJECT_TYPE = {\n numpy.dtype(numpy.float32): {\n (1, ): ObjectType.float,\n (2, ): ObjectType.float2,\n (3, ): ObjectType.float3,\n (4, ): ObjectType.float4,\n\n (2, 2): ObjectType.matrix2x2,\n (2, 3): ObjectType.matrix2x3,\n (2, 4): ObjectType.matrix2x4,\n (3, 2): ObjectType.matrix3x2,\n (3, 3): ObjectType.matrix3x3,\n (3, 4): ObjectType.matrix3x4,\n (4, 2): ObjectType.matrix4x2,\n (4, 3): ObjectType.matrix4x3,\n (4, 4): ObjectType.matrix4x4,\n },\n\n numpy.dtype(numpy.int32): {\n (1, ): ObjectType.int,\n (2, ): ObjectType.int2,\n (3, ): ObjectType.int3,\n (4, ): ObjectType.int4,\n },\n\n numpy.dtype(numpy.uint32): {\n (1, ): ObjectType.unsigned_int,\n (2, ): ObjectType.unsigned_int2,\n (3, ): ObjectType.unsigned_int3,\n (4, ): ObjectType.unsigned_int4,\n },\n\n 'default': ObjectType.user,\n}\n\nFORMAT_TO_DTYPE = {\n Format.float: (numpy.float32, 1),\n Format.float2: (numpy.float32, 2),\n Format.float3: (numpy.float32, 3),\n Format.float4: (numpy.float32, 4),\n\n Format.int: (numpy.int32, 1),\n Format.int2: (numpy.int32, 2),\n Format.int3: (numpy.int32, 3),\n Format.int4: (numpy.int32, 4),\n\n Format.unsigned_int: (numpy.uint32, 1),\n Format.unsigned_int2: (numpy.uint32, 2),\n Format.unsigned_int3: (numpy.uint32, 3),\n Format.unsigned_int4: (numpy.uint32, 4),\n\n Format.short: (numpy.int16, 1),\n Format.short2: (numpy.int16, 2),\n Format.short3: (numpy.int16, 3),\n Format.short4: (numpy.int16, 4),\n\n Format.unsigned_short: (numpy.uint16, 1),\n Format.unsigned_short2: (numpy.uint16, 2),\n Format.unsigned_short3: (numpy.uint16, 3),\n Format.unsigned_short4: (numpy.uint16, 4),\n\n Format.byte: (numpy.int8, 1),\n Format.byte2: (numpy.int8, 2),\n Format.byte3: (numpy.int8, 3),\n Format.byte4: (numpy.int8, 4),\n\n Format.unsigned_byte: (numpy.uint8, 1),\n Format.unsigned_byte2: (numpy.uint8, 2),\n Format.unsigned_byte3: (numpy.uint8, 3),\n Format.unsigned_byte4: (numpy.uint8, 4),\n}\n\nDTYPE_TO_FORMAT = {\n numpy.dtype(numpy.float32): {\n 1: Format.float,\n 2: Format.float2,\n 3: Format.float3,\n 4: Format.float4\n },\n\n numpy.dtype(numpy.int32): {\n 1: Format.int,\n 2: Format.int2,\n 3: Format.int3,\n 4: Format.int4\n },\n\n numpy.dtype(numpy.uint32): {\n 1: Format.unsigned_int,\n 2: Format.unsigned_int2,\n 3: Format.unsigned_int3,\n 4: Format.unsigned_int4\n },\n\n numpy.dtype(numpy.int16): {\n 1: Format.short,\n 2: Format.short2,\n 3: Format.short3,\n 4: Format.short4\n },\n\n numpy.dtype(numpy.uint16): {\n 1: Format.unsigned_short,\n 2: Format.unsigned_short2,\n 3: Format.unsigned_short3,\n 4: Format.unsigned_short4\n },\n\n numpy.dtype(numpy.int8): {\n 1: Format.byte,\n 2: Format.byte2,\n 3: Format.byte3,\n 4: Format.byte4\n },\n\n numpy.dtype(numpy.uint8): {\n 1: Format.unsigned_byte,\n 2: Format.unsigned_byte2,\n 3: Format.unsigned_byte3,\n 4: Format.unsigned_byte4\n },\n\n 'default': Format.user\n}\n\n\nPYOPTIX_CLASS_TO_OBJECT_TYPE = {\n 'Buffer': ObjectType.buffer,\n 'TextureSampler': ObjectType.texture_sampler,\n 'Program': ObjectType.program,\n 'Group': ObjectType.group,\n 'GeometryGroup': ObjectType.geometry_group,\n 'GeometryInstance': ObjectType.geometry_instance,\n 'Selector': ObjectType.selector,\n 'Transform': ObjectType.transform,\n\n 'default': None,\n}\n\n\nWRAP_STRING_TO_OPTIX_ENUM = {\n 'repeat': WrapMode.repeat,\n 'clamp_to_edge': WrapMode.clamp_to_edge,\n 'mirror': WrapMode.mirror,\n 'clamp_to_border': WrapMode.clamp_to_border,\n}\n\nFILTERING_STRING_TO_OPTIX_ENUM = {\n 'nearest': FilterMode.nearest,\n 'linear': FilterMode.linear,\n 'none': FilterMode.none,\n}\n\nREAD_STRING_TO_OPTIX_ENUM = {\n 'element_type': TextureReadMode.element_type,\n 'normalized_float': TextureReadMode.normalized_float,\n}\n\nINDEXING_STRING_TO_OPTIX_ENUM = {\n 'normalized_coordinates': TextureIndexMode.normalized_coordinates,\n 'array_index': TextureIndexMode.array_index,\n}\n\nBUFFER_STRING_TO_OPTIX_ENUM = {\n 'io': BufferType.input_output,\n 'i': BufferType.input,\n 'o': BufferType.output,\n}\n\n\ndef get_dtype_from_object_type(object_type):\n if object_type in OBJECT_TYPE_TO_DTYPE_SHAPE:\n return OBJECT_TYPE_TO_DTYPE_SHAPE[object_type]\n else:\n return None, None\n\n\ndef get_object_type_from_dtype(dtype, shape):\n if dtype in DTYPE_SHAPE_TO_OBJECT_TYPE and shape in DTYPE_SHAPE_TO_OBJECT_TYPE[dtype]:\n return DTYPE_SHAPE_TO_OBJECT_TYPE[dtype][shape]\n else:\n return DTYPE_SHAPE_TO_OBJECT_TYPE['default']\n\n\ndef get_format_from_dtype(dtype, type_size):\n if dtype in DTYPE_TO_FORMAT and type_size in DTYPE_TO_FORMAT[dtype]:\n return DTYPE_TO_FORMAT[dtype][type_size]\n else:\n return DTYPE_TO_FORMAT['default']\n\n\ndef get_object_type_from_pyoptix_class(instance):\n try:\n if instance.__class__.__name__ in PYOPTIX_CLASS_TO_OBJECT_TYPE:\n return PYOPTIX_CLASS_TO_OBJECT_TYPE[instance.__class__.__name__]\n else:\n for base in instance.__class__.__bases__:\n if base.__name__ in PYOPTIX_CLASS_TO_OBJECT_TYPE:\n return PYOPTIX_CLASS_TO_OBJECT_TYPE[base.__name__]\n return PYOPTIX_CLASS_TO_OBJECT_TYPE['default']\n except Exception:\n return None\n\n\ndef convert_wrap_mode(string):\n if isinstance(string, str) and string.lower() in WRAP_STRING_TO_OPTIX_ENUM:\n return WRAP_STRING_TO_OPTIX_ENUM[string]\n else:\n return string\n\n\ndef convert_filtering_mode(string):\n if isinstance(string, str) and string.lower() in FILTERING_STRING_TO_OPTIX_ENUM:\n return FILTERING_STRING_TO_OPTIX_ENUM[string]\n else:\n return string\n\n\ndef convert_read_mode(string):\n if isinstance(string, str) and string.lower() in READ_STRING_TO_OPTIX_ENUM:\n return READ_STRING_TO_OPTIX_ENUM[string]\n else:\n return string\n\n\ndef convert_indexing_mode(string):\n if isinstance(string, str) and string.lower() in INDEXING_STRING_TO_OPTIX_ENUM:\n return INDEXING_STRING_TO_OPTIX_ENUM[string]\n else:\n return string\n\n\ndef convert_buffer_type(string):\n if isinstance(string, str) and string.lower() in BUFFER_STRING_TO_OPTIX_ENUM:\n return BUFFER_STRING_TO_OPTIX_ENUM[string]\n else:\n raise string\n"
] |
[
[
"numpy.dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shashank-srikant/reckless-minimax
|
[
"840745614be59d75535a81ef5ca79f4abdfcfaa6",
"840745614be59d75535a81ef5ca79f4abdfcfaa6"
] |
[
"test_suite/toy_problem.py",
"experiments/scale_experiment.py"
] |
[
"\"\"\"\nPython implementation of the toy problem\nBy:\n - Gidel et al., Frank-Wolfe Algorithms for Saddle Point Problems (2016)\n\"\"\"\nfrom __future__ import print_function, division\nimport numpy as np\nfrom saddle_point_problem import SaddlePointProblem\nimport warnings\n\nclass ToyProblem(SaddlePointProblem):\n \"\"\"A simple saddle point\n problem over the unit cube in dimension D_x + D_y\n The problem comes with a matrix that is initialized randomly, to ensure\n reproducible results, set your seed before creating the object\n i.e., np.random.seed(seed)\n \"\"\"\n def __init__(self, D_x=5, D_y=5, mu=0.5):\n \"\"\"\n Set the parameters of the problem\n The problem comes with a matrix that is initialized randomly, to ensure\n reproducible results, set your seed before creating the object\n i.e., np.random.seed(seed)\n :param D_x:\n :param D_y:\n :param mu:\n \"\"\"\n super(ToyProblem, self).__init__(D_x, D_y)\n self._x_opt = (0.75 - 0.25) * np.random.random(self._D_x) + 0.25\n self._y_opt = (0.75 - 0.25) * np.random.random(self._D_y) + 0.25\n self._M = (0.1 + 0.1) * np.random.random((self._D_x, self._D_y)) - 0.1\n self._half_mu = 0.5 * mu\n\n def _fct(self, x, y):\n return self._half_mu * np.sum(np.square(x - self._x_opt)) + \\\n np.dot((x - self._x_opt).T, np.dot(self._M, y - self._y_opt)) - self._half_mu * np.sum(np.square(y - self._y_opt))\n\n\n def _call_fct(self, x, y):\n return self._fct(x, y)\n\n\n\n\n\nif __name__ == \"__main__\":\n print(\"I'm just a module to be called by others, testing here\")\n tp = ToyProblem(D_x=2, D_y=2)\n x = np.random.random(2)\n print (\"Objective value at a random point:\" ,tp.evaluate(x, np.random.random(2)))\n print(\"Fixing x maximizing y:\", tp.evaluate(x, tp.get_y_opt()))\n print(\"Objective value at saddle point:\", tp.evaluate(tp.get_x_opt(), tp.get_y_opt()))\n\n",
"\"\"\"\nExperiments on the Robust DE problems\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom test_suite.robust_de_problems import RobustDEProblem\nfrom methods.reckless import Reckless\nfrom methods.coev import CoevAlternating, CoevParallel\nfrom methods.mmde_2017 import MMDE\nfrom utils.latex_tbl import df_2_tex\nfrom utils.plot_curves import plot_curves\nimport json\nimport os\n\nRESULTS_DIR = os.path.join(os.path.dirname(__file__), 'results')\n\n\ndef run_one_fun(fun_num):\n if not os.path.exists(RESULTS_DIR):\n os.mkdir(RESULTS_DIR)\n\n np.random.seed(0)\n num_runs = 60\n max_fevals = int(1e4)\n\n print(\"Running Scalability experiment for fun num: {}\".format(fun_num))\n\n dims = [2, 5, 10, 15, 20, 40, 50]\n algs = ['MMDE','Reckless', 'CoevAlternating', 'CoevParallel']\n #algs = ['Reckless', 'CoevAlternating']\n\n regret_curves = {\n 'metadata': {\n 'xlabel': '$n=n_x=n_y$',\n 'ylabel': '$r(\\mathbf{x}_*)$',\n 'title': '',\n 'filepath': None, # 'plot.pdf'\n 'plt_type': ''\n },\n 'data': []\n }\n\n regret_file = os.path.join(RESULTS_DIR, \"scale_regret_curves_%d\" % fun_num)\n regret_curves['data'] = []\n regret_curves['metadata']['filepath'] = regret_file + \".pdf\"\n regret_curves['metadata']['title'] = '$\\mathcal{L}_%d$' % fun_num\n for alg in algs:\n xs = []\n regret_fevals = []\n regret_errs_fevals = []\n regrets = []\n for dim in dims:\n regret_runs = []\n for run in range(num_runs):\n np.random.seed(run)\n prob = RobustDEProblem(D_x=dim, D_y=dim, fun_num=fun_num)\n x_opt, y_opt, _ = eval(alg)(prob.evaluate, D_x=dim, D_y=dim, max_fevals=int(max_fevals * dim), seed=run).run()\n\n if run == 0:\n xs.append(dim)\n\n regret = prob.regret(x_opt, y_opt)\n\n regret_runs.append(regret)\n\n regret_fevals.append(np.mean(regret_runs))\n regret_errs_fevals.append(np.std(regret_runs))\n\n regrets.append(regret_runs)\n\n regret_curves['data'].append(\n {\n 'name': alg,\n 'ys': regrets,\n 'm_ys': regret_fevals,\n 'std_ys': regret_errs_fevals,\n 'xs': xs\n })\n\n assert len(regret_curves['data']) == len(algs)\n\n #plot_curves(regret_curves)\n\n with open(regret_file + \".json\", \"w\") as f:\n json.dump(regret_curves, f)\n\n\ndef main():\n from multiprocessing import Pool\n\n fun_nums = [1, 2]#, 4] # fct 1 & 2 are scalable\n\n p = Pool(2)\n p.map(run_one_fun, fun_nums)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.square",
"numpy.dot",
"numpy.random.random"
],
[
"numpy.std",
"numpy.mean",
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
trungnt13/odin-ai
|
[
"9c6986a854e62da39637ea463667841378b7dd84",
"9c6986a854e62da39637ea463667841378b7dd84",
"9c6986a854e62da39637ea463667841378b7dd84"
] |
[
"odin/fuel/nlp_data/_base.py",
"odin/visual/scatter_plot.py",
"odin/ml/gmm_tmat.py"
] |
[
"import os\nimport pickle\nimport re\nfrom abc import ABCMeta, abstractproperty\nfrom itertools import chain\nfrom numbers import Number\nfrom types import MethodType\nfrom typing import Dict, Generator, Iterable, List, Optional, Tuple, Union\nfrom typing_extensions import Literal\nfrom urllib.request import urlretrieve\nfrom odin.backend.types_helpers import DataType, LabelType\n\nimport numpy as np\nimport tensorflow as tf\nfrom numpy import ndarray\nfrom odin.fuel.dataset_base import IterableDataset, get_partition\nfrom odin.utils import one_hot\nfrom scipy import sparse\nfrom scipy.sparse import csr_matrix, spmatrix\nfrom six import add_metaclass, string_types\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.utils.validation import check_is_fitted\n\ntry:\n from tokenizers import Encoding\n from tokenizers.implementations import BaseTokenizer\nexcept ImportError:\n Encoding = \"Encoding\"\n BaseTokenizer = object\n\n# ===========================================================================\n# Helpers\n# ===========================================================================\n_token_pattern = re.compile(r\"(?u)\\b[a-fA-F]\\w+\\b\")\n\n\ndef _simple_tokenizer(doc: str) -> List[str]:\n return _token_pattern.findall(doc)\n\n\ndef _simple_preprocess(doc: str) -> str:\n doc = doc.lower().strip()\n doc = re.sub(r\"'\", \"\", doc)\n doc = re.sub(r\"\\W\", \" \", doc)\n doc = re.sub(r\"\\s+\", \" \", doc)\n return doc\n\n\n# ===========================================================================\n# Base dataset\n# ===========================================================================\n@add_metaclass(ABCMeta)\nclass NLPDataset(IterableDataset):\n r\"\"\"\n Arguments:\n algorithm: {'tf', 'tfidf', 'bert'}\n Which algorithm used for tokenizing\n 'tf' - term frequency or bag-of-words\n 'tfidf' - term count and inverse document frequency\n 'count' - count vectorizer\n 'bert' - BERT tokenizer\n vocab_size: int\n The size of the final vocabulary, including all tokens and alphabet.\n min_frequency: int\n When building the vocabulary ignore terms that have a document\n frequency strictly lower than the given threshold. This value is also\n called cut-off in the literature.\n If float in range of [0.0, 1.0], the parameter represents a proportion\n of documents, integer absolute counts.\n This parameter is ignored if vocabulary is not None.\n max_frequency : float or int, default=1.0\n When building the vocabulary ignore terms that have a document\n frequency strictly higher than the given threshold (corpus-specific\n stop words).\n If float in range [0.0, 1.0], the parameter represents a proportion of\n documents, integer absolute counts.\n This parameter is ignored if vocabulary is not None.\n limit_alphabet: int\n The maximum different characters to keep in the alphabet.\n max_length : int\n longest document length\n ngram_range : tuple (min_n, max_n), default=(1, 1)\n The lower and upper boundary of the range of n-values for different\n n-grams to be extracted. All values of n such that min_n <= n <= max_n\n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only\n only bigrams.\n Only applies if ``analyzer is not callable``.\n \"\"\"\n\n def __init__(self,\n algorithm: str = 'tf',\n vocab_size: int = 1000,\n min_frequency: int = 2,\n max_frequency: float = 0.98,\n limit_alphabet: int = 1000,\n max_length: Optional[int] = 1000,\n ngram_range: Tuple[int, int] = (1, 1),\n vocabulary: Dict[str, int] = None,\n retrain_tokenizer: bool = False,\n cache_path: str = \"~/nlp_data\"):\n self._cache_path = os.path.abspath(os.path.expanduser(cache_path))\n self._labels = []\n #\n self._vocabulary = None\n if vocabulary is not None:\n vocab_size = len(vocabulary)\n with open(os.path.join(self.cache_path, \"bert_vocab.txt\"), 'w') as f:\n for special_token in (\"[SEP]\", \"[UNK]\", \"[CLS]\", \"[PAD]\", \"[MASK]\"):\n f.write(f\"{special_token}\\n\")\n for term, idx in sorted(vocabulary.items(), key=lambda x: x[-1]):\n f.write(term + '\\n')\n self._init_vocabulary = vocabulary\n self.max_length = max_length\n self.vocab_size = int(vocab_size)\n self.min_frequency = int(min_frequency)\n self.max_frequency = float(max_frequency)\n self.limit_alphabet = int(limit_alphabet)\n self.ngram_range = tuple(ngram_range)\n self.retrain_tokenizer = bool(retrain_tokenizer)\n # load exists tokenizer\n algorithm = str(algorithm).lower().strip()\n assert algorithm in ('tf', 'tfidf', 'bert', 'count'), \\\n f\"Support algorithm: tf, tfidf, count and bert; but given:{algorithm}\"\n self.algorithm = algorithm\n self._tokenizer = None\n\n @property\n def data_type(self) -> DataType:\n return 'text'\n\n @property\n def label_type(self) -> LabelType:\n raise NotImplementedError\n\n @property\n def shape(self) -> List[int]:\n return self.transform('train').shape[1:]\n\n @property\n def labels(self) -> List[str]:\n return np.array(self._labels)\n\n @abstractproperty\n def train_text(self) -> Iterable[str]:\n raise NotImplementedError\n\n @abstractproperty\n def valid_text(self) -> Iterable[str]:\n raise NotImplementedError\n\n @abstractproperty\n def test_text(self) -> Iterable[str]:\n raise NotImplementedError\n\n @property\n def train_labels(self) -> Union[ndarray, spmatrix]:\n return np.asarray([])\n\n @property\n def valid_labels(self) -> Union[ndarray, spmatrix]:\n return np.asarray([])\n\n @property\n def test_labels(self) -> Union[ndarray, spmatrix]:\n return np.asarray([])\n\n def filter_by_length(\n self,\n inputs: Union[int, List[str], List[Encoding]],\n iqr_multiplier: float = 1.5,\n length_range: Optional[Tuple[int, int]] = None\n ) -> Tuple[List[bool], int, int]:\n r\"\"\" Using inter-quartile to filter out outlier documents by their\n tokenized lengths. \"\"\"\n lengths = np.asarray(\n [\n len(i.split(\" \")) if isinstance(i, string_types) else\n (int(i) if isinstance(i, Number) else len(i)) for i in inputs\n ],\n dtype=np.int32,\n )\n if length_range is None:\n q1 = np.quantile(lengths, 0.25)\n q3 = np.quantile(lengths, 0.75)\n iqr = q3 - q1\n lmin = q1 - iqr_multiplier * iqr\n lmax = q3 + iqr_multiplier * iqr\n else:\n lmin, lmax = length_range\n mask = np.logical_and(lengths > lmin, lengths < lmax)\n return mask, lmin, lmax\n\n def transform(self,\n documents: Optional[Union[str, List[str]]] = None) -> spmatrix:\n r\"\"\" Vectorize the input documents \"\"\"\n # cached transformed dataset\n if isinstance(documents, string_types) and \\\n documents in ('train', 'valid', 'test'):\n attr_name = f'_x_{documents}'\n if hasattr(self, attr_name):\n return getattr(self, attr_name)\n x = self.transform(\n get_partition(documents,\n train=self.train_text,\n valid=self.valid_text,\n test=self.test_text))\n setattr(self, attr_name, x)\n return x\n # other data\n if self.algorithm in ('tf', 'tfidf', 'count'):\n x = self.tokenizer.transform(documents)\n # sorted ensure right ordering for Tensorflow SparseTensor\n else:\n if isinstance(documents, Generator):\n documents = [i for i in documents]\n x = sparse.csr_matrix(\n [i.ids for i in self.encode(documents, post_process=True)])\n return x\n\n @property\n def cache_path(self) -> str:\n if not os.path.exists(self._cache_path):\n os.makedirs(self._cache_path)\n return self._cache_path\n\n @property\n def tokenizer(self) -> Union[BaseTokenizer, CountVectorizer, TfidfVectorizer]:\n pkl_path = os.path.join(self.tokenizer_path, \"model.pkl\")\n if self._tokenizer is not None:\n return self._tokenizer\n ### get pickled tokenizer\n if os.path.exists(pkl_path) and not self.retrain_tokenizer:\n with open(pkl_path, 'rb') as f:\n tokenizer = pickle.load(f)\n ### train new tokenizer\n else:\n self.retrain_tokenizer = False\n if self.algorithm == 'bert':\n from tokenizers import BertWordPieceTokenizer\n tokenizer = BertWordPieceTokenizer(\n vocab_file=None if self._init_vocabulary is None else os.path.\n join(self.cache_path, \"bert_vocab.txt\"))\n tokenizer.enable_truncation(max_length=self.max_length)\n tokenizer.enable_padding(length=self.max_length)\n # train the tokenizer\n if self._init_vocabulary is None:\n path = os.path.join(self.cache_path, 'train.txt')\n with open(path, 'w') as f:\n for i in chain(self.train_text, self.valid_text, self.test_text):\n if len(i) == 0:\n continue\n f.write(i + \"\\n\" if i[-1] != \"\\n\" else i)\n tokenizer.train(files=path,\n vocab_size=self.vocab_size,\n min_frequency=self.min_frequency,\n limit_alphabet=self.limit_alphabet,\n show_progress=True)\n tokenizer.save_model(self.tokenizer_path)\n elif self.algorithm in ('count', 'tf', 'tfidf'):\n if self.algorithm == 'count':\n tokenizer = CountVectorizer(input='content',\n ngram_range=self.ngram_range,\n min_df=self.min_frequency,\n max_df=self.max_frequency,\n max_features=self.vocab_size,\n vocabulary=self._init_vocabulary,\n tokenizer=_simple_tokenizer,\n stop_words='english')\n elif self.algorithm in ('tf', 'tfidf'):\n tokenizer = TfidfVectorizer(\n input='content',\n ngram_range=self.ngram_range,\n min_df=self.min_frequency,\n max_df=self.max_frequency,\n max_features=self.vocab_size,\n stop_words='english',\n vocabulary=self._init_vocabulary,\n tokenizer=_simple_tokenizer,\n use_idf=False if self.algorithm == 'tf' else True)\n tokenizer.fit(\n (_simple_preprocess(i)\n for i in chain(self.train_text, self.valid_text, self.test_text)))\n else:\n raise NotImplementedError\n # save the pickled model\n with open(pkl_path, \"wb\") as f:\n pickle.dump(tokenizer, f)\n ### assign and return\n self._tokenizer = tokenizer\n return self._tokenizer\n\n @property\n def tokenizer_path(self) -> str:\n p = os.path.join(\n self.cache_path, f\"tokenizer_{self.algorithm}_{self.vocab_size}_\"\n f\"{self.min_frequency}_{self.max_frequency}_\"\n f\"{self.limit_alphabet}\")\n if not os.path.exists(p):\n os.makedirs(p)\n return p\n\n @property\n def vocabulary(self) -> Dict[int, str]:\n if self._vocabulary is None:\n if self.algorithm in ('tf', 'tfidf', 'count'):\n vocab = self.tokenizer.vocabulary_\n else:\n vocab = self.tokenizer.get_vocab()\n self._vocabulary = {\n v: k for k, v in sorted(vocab.items(), key=lambda x: x[-1])\n }\n return self._vocabulary\n\n @property\n def vocabulary_size(self) -> int:\n return len(self.vocabulary)\n\n def encode(self,\n inputs: Union[str, List[str]],\n add_special_tokens: bool = True,\n post_process: bool = False) -> List[Encoding]:\n r\"\"\" Encode sequence of text string \"\"\"\n is_batch = True\n if isinstance(inputs, string_types):\n inputs = [inputs]\n is_batch = False\n outputs = self.tokenizer.encode_batch(inputs, add_special_tokens=True)\n if post_process:\n outputs = [\n self.tokenizer.post_process(i, add_special_tokens=add_special_tokens)\n for i in outputs\n ]\n return outputs if is_batch else outputs[0]\n\n def post_process(self,\n encoding,\n add_special_tokens: bool = True) -> List[Encoding]:\n r\"\"\" Apply all the post-processing steps to the given encodings.\n\n The various steps are:\n 1. Truncate according to global params (provided to `enable_truncation`)\n 2. Apply the PostProcessor\n 3. Pad according to global params. (provided to `enable_padding`)\n \"\"\"\n is_batch = True\n if isinstance(encoding, Encoding):\n encoding = [encoding]\n is_batch = False\n outputs = [\n self.tokenizer.post_process(i, add_special_tokens=add_special_tokens)\n for i in encoding\n ]\n return outputs if is_batch else outputs[0]\n\n def decode(self,\n ids: List[int],\n skip_special_tokens: Optional[bool] = True) -> List[str]:\n r\"\"\" Decode sequence of integer indices and return original sequence \"\"\"\n is_batch = True\n if not isinstance(ids[0], (tuple, list, ndarray)):\n ids = [ids]\n is_batch = False\n outputs = self.tokenizer.decode_batch(\n ids, skip_special_tokens=skip_special_tokens)\n return outputs if is_batch else outputs[0]\n\n def create_dataset(self,\n partition: Literal['train', 'valid', 'test'] = 'train',\n *,\n batch_size: Optional[int] = 32,\n drop_remainder: bool = False,\n shuffle: int = 1000,\n cache: Optional[str] = '',\n prefetch: Optional[int] = tf.data.experimental.AUTOTUNE,\n parallel: Optional[int] = tf.data.experimental.AUTOTUNE,\n label_percent: Union[bool, float] = False,\n seed: int = 1) -> tf.data.Dataset:\n r\"\"\"\n Arguments:\n partition : {'train', 'valid', 'test'}\n label_percent : a Boolean or Scalar. If True, return both image and label,\n otherwise, only image is returned.\n If a scalar is provided, it indicate the percent of labelled data\n in the mask.\n\n Return :\n tensorflow.data.Dataset :\n image - `(tf.float32, (None, 64, 64, 1))`\n label - `(tf.float32, (None, 5))`\n mask - `(tf.bool, (None, 1))` if 0. < label_percent < 1.\n where, `mask=1` mean labelled data, and `mask=0` for unlabelled data\n \"\"\"\n label_percent = float(label_percent)\n gen = tf.random.experimental.Generator.from_seed(seed=seed)\n x = self.transform(partition)\n y = get_partition(partition,\n train=self.train_labels,\n valid=self.valid_labels,\n test=self.test_labels)\n # remove empty docs\n indices = np.array(np.sum(x, axis=-1) > 0).ravel()\n x = x[indices]\n if len(y) > 0:\n y = y[indices]\n # convert to one-hot\n if label_percent > 0 and len(y) > 0 and y.ndim == 1:\n y = one_hot(y, self.n_labels)\n\n def _process(*data):\n data = tuple([\n tf.cast(\n tf.sparse.to_dense(i) if isinstance(i, tf.SparseTensor) else i,\n tf.float32) for i in data\n ])\n if label_percent:\n if 0. < label_percent < 1.: # semi-supervised mask\n mask = gen.uniform(shape=(1,)) < label_percent\n return dict(inputs=tuple(data), mask=mask)\n return data\n return data[0]\n\n # prepare the sparse matrices\n if isinstance(x, spmatrix):\n x = tf.SparseTensor(indices=sorted(zip(*x.nonzero())),\n values=x.data,\n dense_shape=x.shape)\n ds = tf.data.Dataset.from_tensor_slices(x)\n if label_percent > 0:\n if isinstance(y, spmatrix):\n y = tf.SparseTensor(indices=sorted(zip(*y.nonzero())),\n values=y.data,\n dense_shape=y.shape)\n y = tf.data.Dataset.from_tensor_slices(y)\n ds = tf.data.Dataset.zip((ds, y))\n # configurate dataset\n if cache is not None:\n ds = ds.cache(str(cache))\n ds = ds.map(_process, parallel)\n # shuffle must be called after cache\n if shuffle is not None and shuffle > 0:\n ds = ds.shuffle(int(shuffle), seed=seed, reshuffle_each_iteration=True)\n if batch_size is not None:\n ds = ds.batch(batch_size, drop_remainder)\n if prefetch is not None:\n ds = ds.prefetch(prefetch)\n return ds\n\n\n# ===========================================================================\n# Others\n# ===========================================================================\nclass ImdbReview(NLPDataset):\n\n def __init__(self):\n import tensorflow_datasets as tfds\n train = tfds.load(\n 'imdb_reviews',\n split='train',\n read_config=tfds.ReadConfig(shuffle_seed=seed,\n shuffle_reshuffle_each_iteration=True),\n )\n test = tfds.load(\n 'imdb_reviews',\n split='test',\n read_config=tfds.ReadConfig(shuffle_seed=seed,\n shuffle_reshuffle_each_iteration=True),\n )\n print(train)\n\n\nclass TinyShakespear(NLPDataset):\n\n def __init__(self):\n import tensorflow_datasets as tfds\n 'test'\n 'train'\n 'validation'\n d = tfds.load(\n name='tiny_shakespeare',\n read_config=tfds.ReadConfig(shuffle_seed=seed,\n shuffle_reshuffle_each_iteration=True),\n )['train']\n d = d.map(lambda x: tf.strings.unicode_split(x['text'], 'UTF-8'))\n # train split includes vocabulary for other splits\n vocabulary = sorted(set(next(iter(d)).numpy()))\n d = d.map(lambda x: {'cur_char': x[:-1], 'next_char': x[1:]})\n d = d.unbatch()\n seq_len = 100\n batch_size = 2\n d = d.batch(seq_len)\n d = d.batch(batch_size)\n\n\nclass MathArithmetic(NLPDataset):\n\n def __init__(self):\n import tensorflow_datasets as tfds\n train_examples, val_examples = tfds.load(\n 'math_dataset/arithmetic__mul',\n split=['train', 'test'],\n read_config=tfds.ReadConfig(shuffle_seed=seed,\n shuffle_reshuffle_each_iteration=True),\n as_supervised=True)\n",
"from __future__ import absolute_import, division, print_function\n\nfrom collections import OrderedDict\nfrom numbers import Number\nfrom typing import Optional\n\nimport numpy as np\nfrom six import string_types\n\nfrom odin.utils import as_tuple\nfrom odin.visual.plot_utils import (check_arg_length, generate_palette_colors,\n generate_random_colormaps,\n generate_random_colors,\n generate_random_marker, to_axis)\n\n\n# ===========================================================================\n# Scatter plot\n# ===========================================================================\ndef _parse_scatterXYZ(x, y, z):\n assert x is not None, \"`x` cannot be None\"\n # remove all `1` dimensions\n x = np.squeeze(x)\n if y is not None:\n y = np.squeeze(y)\n assert y.ndim == 1\n if z is not None:\n z = np.square(z)\n assert z.ndim == 1\n # infer y, z from x\n if x.ndim > 2:\n x = np.reshape(x, (-1, np.prod(x.shape[1:])))\n if x.ndim == 1:\n if y is None:\n y = x\n x = np.arange(len(y))\n elif x.ndim == 2:\n if x.shape[1] == 2:\n y = x[:, 1]\n x = x[:, 0]\n elif x.shape[1] > 2:\n z = x[:, 2]\n y = x[:, 1]\n x = x[:, 0]\n return x, y, z\n\n\ndef _validate_color_marker_size_legend(max_n_points,\n color,\n marker,\n size,\n text_marker=False,\n is_colormap=False,\n size_range=8,\n random_seed=1):\n \"\"\"Return: colors, markers, sizes, legends\"\"\"\n from odin.backend import interpolation\n from matplotlib.colors import LinearSegmentedColormap\n # check size range\n if isinstance(size, Number):\n size_range = interpolation.const(vmax=size)\n if isinstance(size_range, Number):\n size_range = interpolation.const(vmax=size_range)\n elif isinstance(size_range, interpolation.Interpolation):\n pass\n else:\n vmin, vmax = as_tuple(size_range, N=2)\n size_range = interpolation.linear(vmin=float(vmin), vmax=float(vmax))\n # check others\n default_color = 'b'\n if isinstance(color, (string_types, LinearSegmentedColormap)):\n default_color = color\n color = None\n # marker\n default_marker = '.'\n if isinstance(marker, string_types):\n default_marker = marker\n marker = None\n legend = [\n [None] * max_n_points, # color\n [None] * max_n_points, # marker\n [None] * max_n_points, # size\n ]\n #\n create_label_map = lambda labs, default_val, fn_gen: \\\n ({labs[0]: default_val}\n if len(labs) == 1 else\n {i: j for i, j in zip(labs, fn_gen(len(labs), seed=random_seed))})\n # ====== check arguments ====== #\n if color is None:\n color = [0] * max_n_points\n else:\n legend[0] = color\n #\n if marker is None:\n marker = [0] * max_n_points\n else:\n legend[1] = marker\n #\n if isinstance(size, Number):\n size = [0] * max_n_points\n elif size is None:\n size = [0] * max_n_points\n else: # given a list of labels\n legend[2] = size\n size_range.norm = np.max(size)\n # ====== validate the length ====== #\n for name, arr in [(\"color\", color), (\"marker\", marker), (\"size\", size)]:\n assert len(arr) == max_n_points, \\\n \"Given %d samples for `%s`, but require %d samples\" % \\\n (len(arr), name, max_n_points)\n # ====== labels set ====== #\n color_labels = np.unique(color)\n color_map = create_label_map(\n color_labels, default_color,\n generate_random_colormaps if is_colormap else generate_palette_colors)\n # generate_random_colors\n marker_labels = np.unique(marker)\n if text_marker:\n fn = lambda mrk, seed: marker_labels\n else:\n fn = generate_random_marker\n marker_map = create_label_map(marker_labels, default_marker, fn)\n #\n size_labels = np.unique(size)\n size_map = create_label_map(size_labels, size_range.vmax,\n lambda n, seed: size_range(np.arange(n)).numpy())\n # ====== prepare legend ====== #\n legend_name = []\n legend_style = []\n for c, m, s in zip(*legend):\n name = []\n style = []\n if c is None: # color\n name.append('')\n style.append(color_map[0])\n else:\n name.append(str(c))\n style.append(color_map[c])\n if m is None: # marker style\n name.append('')\n style.append(marker_map[0])\n else:\n name.append(str(m))\n style.append(marker_map[m])\n if s is None: # size\n name.append('')\n style.append(size_map[0])\n else:\n name.append(str(s))\n style.append(size_map[s])\n # name\n name = tuple(name)\n style = tuple(style)\n if name not in legend_name:\n legend_name.append(name)\n legend_style.append(style)\n #\n legend = OrderedDict([(i, j) for i, j in zip(legend_style, legend_name)])\n # ====== return ====== #\n return ([color_map[i] for i in color], [marker_map[i] for i in marker],\n [size_map[i] for i in size], legend)\n\n\ndef _downsample_scatter_points(x, y, z, max_n_points, *args):\n args = list(args)\n # downsample all data\n if max_n_points is not None and max_n_points < len(x):\n max_n_points = int(max_n_points)\n rand = np.random.RandomState(seed=1)\n ids = rand.permutation(len(x))[:max_n_points]\n x = np.array(x)[ids]\n y = np.array(y)[ids]\n if z is not None:\n z = np.array(z)[ids]\n args = [\n np.array(a)[ids] if isinstance(a, (tuple, list, np.ndarray)) else a\n for a in args\n ]\n return [len(x), x, y, z] + args\n\n\ndef _plot_scatter_points(*, x, y, z, val, color, marker, size, size_range,\n alpha, max_n_points, cbar, cbar_horizontal,\n cbar_nticks, cbar_ticks_rotation, cbar_title,\n cbar_fontsize, legend_enable, legend_loc, legend_ncol,\n legend_colspace, elev, azim, ticks_off, grid, fontsize,\n centroids, xlabel, ylabel, title, ax, **kwargs):\n from matplotlib import pyplot as plt\n import matplotlib as mpl\n # keep the marker as its original text\n text_marker = kwargs.get('text_marker', False)\n x, y, z = _parse_scatterXYZ(x, y, z)\n assert len(x) == len(y), \"Number of samples mismatch\"\n if z is not None:\n assert len(y) == len(z)\n is_3D_mode = False if z is None else True\n ax = to_axis(ax, is_3D_mode)\n ### check the colormap\n if val is None:\n vmin, vmax, color_normalizer = None, None, None\n is_colormap = False\n else:\n from matplotlib.colors import LinearSegmentedColormap\n vmin = np.min(val)\n vmax = np.max(val)\n color_normalizer = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n is_colormap = True\n if is_colormap:\n assert isinstance(color, (string_types, LinearSegmentedColormap)), \\\n \"`colormap` can be string or instance of matplotlib Colormap, \" + \\\n \"but given: %s\" % type(color)\n if not is_colormap and isinstance(color, string_types) and color == 'bwr':\n color = 'b'\n ### perform downsample and select the styles\n max_n_points, x, y, z, color, marker, size = _downsample_scatter_points(\n x, y, z, max_n_points, color, marker, size)\n color, marker, size, legend = _validate_color_marker_size_legend(\n max_n_points,\n color,\n marker,\n size,\n text_marker=text_marker,\n is_colormap=is_colormap,\n size_range=size_range)\n ### centroid style\n centroid_style = dict(horizontalalignment='center',\n verticalalignment='center',\n fontsize=fontsize + 2,\n weight=\"bold\",\n bbox=dict(boxstyle=\"circle\",\n facecolor=\"black\",\n alpha=0.48,\n pad=0.,\n edgecolor='none'))\n ### plotting\n artist = []\n legend_name = []\n for plot_idx, (style, name) in enumerate(legend.items()):\n style = list(style)\n x_, y_, z_, val_ = [], [], [], []\n # get the right set of data points\n for i, (c, m, s) in enumerate(zip(color, marker, size)):\n if c == style[0] and m == style[1] and s == style[2]:\n x_.append(x[i])\n y_.append(y[i])\n if is_colormap:\n val_.append(val[i])\n if is_3D_mode:\n z_.append(z[i])\n # 2D or 3D plot\n if not is_3D_mode:\n z_ = None\n # colormap or normal color\n if not is_colormap:\n val_ = None\n else:\n cm = plt.cm.get_cmap(style[0])\n val_ = color_normalizer(val_)\n style[0] = cm(val_)\n # yield for plotting\n n_art = len(artist)\n yield ax, artist, x_, y_, z_, style\n # check new axis added\n assert len(artist) > n_art, \\\n \"Forgot adding new art object created by plotting\"\n # check if ploting centroid\n if centroids:\n if is_3D_mode:\n ax.text(np.mean(x_),\n np.mean(y_),\n np.mean(z_),\n s=name[0],\n color=style[0],\n **centroid_style)\n else:\n ax.text(np.mean(x_),\n np.mean(y_),\n s=name[0],\n color=style[0],\n **centroid_style)\n # make the shortest name\n name = [i for i in name if len(i) > 0]\n short_name = []\n for i in name:\n if i not in short_name:\n short_name.append(i)\n name = ', '.join(short_name)\n if len(name) > 0:\n legend_name.append(name)\n ### at the end of the iteration, axis configuration\n if len(artist) == len(legend):\n ## colorbar (only enable when colormap is provided)\n if is_colormap and cbar:\n mappable = plt.cm.ScalarMappable(norm=color_normalizer, cmap=cm)\n mappable.set_clim(vmin, vmax)\n cba = plt.colorbar(\n mappable,\n ax=ax,\n shrink=0.99,\n pad=0.01,\n orientation='horizontal' if cbar_horizontal else 'vertical')\n if isinstance(cbar_nticks, Number):\n cbar_range = np.linspace(vmin, vmax, num=int(cbar_nticks))\n cbar_nticks = [f'{i:.2g}' for i in cbar_range]\n elif isinstance(cbar_nticks, (tuple, list, np.ndarray)):\n cbar_range = np.linspace(vmin, vmax, num=len(cbar_nticks))\n cbar_nticks = [str(i) for i in cbar_nticks]\n else:\n raise ValueError(f\"No support for cbar_nticks='{cbar_nticks}'\")\n cba.set_ticks(cbar_range)\n cba.set_ticklabels(cbar_nticks)\n if cbar_title is not None:\n if cbar_horizontal: # horizontal colorbar\n cba.ax.set_xlabel(str(cbar_title), fontsize=cbar_fontsize)\n else: # vertical colorbar\n cba.ax.set_ylabel(str(cbar_title), fontsize=cbar_fontsize)\n cba.ax.tick_params(labelsize=cbar_fontsize,\n labelrotation=cbar_ticks_rotation)\n ## plot the legend\n if len(legend_name) > 0 and bool(legend_enable):\n markerscale = 1.5\n if isinstance(artist[0], mpl.text.Text): # text plot special case\n for i, art in enumerate(list(artist)):\n pos = [art._x, art._y]\n if is_3D_mode:\n pos.append(art._z)\n if is_colormap:\n c = art._color\n else:\n c = art._color\n artist[i] = ax.scatter(*pos, c=c, s=0.1)\n markerscale = 25\n # sort the legends\n legend_name, artist = zip(\n *sorted(zip(legend_name, artist), key=lambda t: t[0]))\n legend_kw = {}\n if legend_loc is not None:\n legend_kw['loc'] = legend_loc\n if legend_ncol is not None:\n legend_kw['ncol'] = legend_ncol\n legend = ax.legend(artist, legend_name,\n labelspacing=0.,\n handletextpad=0.1,\n markerscale=markerscale,\n scatterpoints=1,\n columnspacing=float(legend_colspace),\n fontsize=fontsize,\n **legend_kw)\n # scatteryoffsets=[0.375, 0.5, 0.3125],\n # bbox_to_anchor=(0.5, -0.01),\n # labelspacing=0.,\n # handletextpad=0.1)\n ## tick configuration\n if ticks_off:\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n if is_3D_mode:\n ax.set_zticklabels([])\n if grid:\n ax.set_axisbelow(True)\n ax.grid(grid, which='both', axis='both', linewidth=0.8, alpha=0.5)\n if xlabel is not None:\n ax.set_xlabel(str(xlabel), fontsize=fontsize - 1)\n if ylabel is not None:\n ax.set_ylabel(str(ylabel), fontsize=fontsize - 1)\n if title is not None:\n ax.set_title(str(title), fontsize=fontsize, fontweight='regular')\n if is_3D_mode and (elev is not None or azim is not None):\n ax.view_init(elev=ax.elev if elev is None else elev,\n azim=ax.azim if azim is None else azim)\n\n\n# ===========================================================================\n# Main functions\n# ===========================================================================\ndef plot_scatter(x: np.ndarray,\n y: Optional[np.ndarray] = None,\n z: Optional[np.ndarray] = None,\n val=None,\n ax=None,\n color='bwr',\n marker='o',\n size=4.0,\n size_range=(8., 25.),\n alpha=0.8,\n linewidths=0.,\n linestyle='-',\n edgecolors=None,\n elev=None,\n azim=None,\n ticks_off=True,\n grid=True,\n cbar=False,\n cbar_horizontal=False,\n cbar_nticks=10,\n cbar_ticks_rotation=-30,\n cbar_fontsize=10,\n cbar_title=None,\n legend_enable=True,\n legend_loc: Optional[str] = None,\n legend_ncol: Optional[int] = None,\n legend_colspace=0.4,\n centroids=False,\n max_n_points=None,\n fontsize=8,\n xlabel=None,\n ylabel=None,\n title=None):\n \"\"\"Generalized function for plotting scatter points colored or heatmap.\n\n Parameters\n ----------\n x : {1D, or 2D array} [n_samples,]\n y : {None, 1D-array} [n_samples,]\n z : {None, 1D-array} [n_samples,]\n if provided, plot in 3D\n val : 1D-array (num_samples,)\n float value for the intensity of given class\n ax : {None, int, tuple of int, Axes object) (default: None)\n if int, `ax` is the location of the subplot (e.g. `111`)\n if tuple, `ax` is tuple of location (e.g. `(1, 1, 1)`)\n if Axes object, `ax` must be `mpl_toolkits.mplot3d.Axes3D` in case `z`\n is given\n color: array [n_samples,]\n list of colors for each class, check `generate_random_colors`,\n length of color must be equal to `x` and `y`\n marker: array [n_samples,]\n different marker for each color, default marker is '.'\n legend_ncol : int (default: 3)\n number of columns for displaying legends\n legend_colspace : float (default: 0.4)\n space between columns in the legend\n legend_loc : {str, int}\n ‘best’ 0\n ‘upper right’ 1\n ‘upper left’ 2\n ‘lower left’ 3\n ‘lower right’ 4\n ‘right’ 5\n ‘center left’ 6\n ‘center right’ 7\n ‘lower center’ 8\n ‘upper center’ 9\n ‘center’ 10\n elev : {None, Number} (default: None or 30 degree)\n stores the elevation angle in the z plane, with `elev=90` is\n looking from top down.\n This can be used to rotate the axes programatically.\n azim : {None, Number} (default: None or -60 degree)\n stores the azimuth angle in the x,y plane.\n This can be used to rotate the axes programatically.\n centroids : Boolean. If True, annotate the labels on centroid of\n each cluster.\n xlabel, ylabel: str (optional)\n label for x-axis and y-axis\n title : {None, string} (default: None)\n specific title for the subplot\n \"\"\"\n from matplotlib import pyplot as plt\n for ax, artist, x, y, z, \\\n (color, marker, size) in _plot_scatter_points(**locals()):\n kwargs = dict(\n c=color,\n marker=marker,\n s=size,\n linewidths=linewidths,\n linestyle=linestyle,\n edgecolors=edgecolors,\n alpha=alpha,\n )\n if z is not None: # 3D plot\n art = ax.scatter(x, y, z, **kwargs)\n else: # 2D plot\n art = ax.scatter(x, y, **kwargs)\n artist.append(art)\n return ax\n\n\ndef plot_scatter_text(x,\n y=None,\n z=None,\n val=None,\n ax=None,\n color='bwr',\n marker='o',\n weight='normal',\n size=4.0,\n size_range=(8., 25.),\n alpha=0.8,\n linewidths=0.,\n linestyle='-',\n edgecolors=None,\n elev=None,\n azim=None,\n ticks_off=True,\n grid=True,\n cbar=False,\n cbar_horizontal=False,\n cbar_nticks=10,\n cbar_ticks_rotation=-30,\n cbar_title=None,\n legend_enable=True,\n legend_loc='upper center',\n legend_ncol=3,\n legend_colspace=0.4,\n centroids=False,\n max_n_points=None,\n fontsize=10,\n title=None):\n r\"\"\"\n Arguments:\n x : {1D, or 2D array} [n_samples,]\n y : {None, 1D-array} [n_samples,]\n z : {None, 1D-array} [n_samples,]\n if provided, plot in 3D\n marker : {tuple, list, array}\n list of the text or character for plotting at each data point\n weight : {'normal', 'bold', 'heavy', 'light', 'ultrabold', 'ultralight'}.\n Font weight\n ax : {None, int, tuple of int, Axes object) (default: None)\n if int, `ax` is the location of the subplot (e.g. `111`)\n if tuple, `ax` is tuple of location (e.g. `(1, 1, 1)`)\n if Axes object, `ax` must be `mpl_toolkits.mplot3d.Axes3D` in case `z`\n is given\n elev : {None, Number} (default: None or 30 degree)\n stores the elevation angle in the z plane, with `elev=90` is\n looking from top down.\n This can be used to rotate the axes programatically.\n azim : {None, Number} (default: None or -60 degree)\n stores the azimuth angle in the x,y plane.\n This can be used to rotate the axes programatically.\n \"\"\"\n xlim = (np.inf, -np.inf)\n ylim = (np.inf, -np.inf)\n zlim = (np.inf, -np.inf)\n for ax, artist, x, y, z, \\\n (color, marker, size) in _plot_scatter_points(text_marker=True,\n **locals()):\n if len(color) != len(x):\n color = [color] * len(x)\n # axes limits\n xlim = (min(xlim[0], min(x)), max(xlim[1], max(x)))\n ylim = (min(ylim[0], min(y)), max(ylim[1], max(y)))\n if z is not None:\n zlim = (min(zlim[0], min(z)), max(zlim[1], max(z)))\n # font style\n fontdict = dict(size=size, weight=weight)\n # alignment\n kwargs = dict(horizontalalignment='center', verticalalignment='center')\n if z is not None: # 3D plot\n for a, b, c, col in zip(x, y, z, color):\n fontdict.update(color=col)\n art = ax.text(a,\n b,\n c,\n s=str(marker),\n elev=elev,\n azim=azim,\n fontdict=fontdict,\n **kwargs)\n else: # 2D plot\n for a, b, col in zip(x, y, color):\n fontdict.update(color=col)\n art = ax.text(a, b, s=str(marker), fontdict=fontdict, **kwargs)\n # store the art\n artist.append(art)\n # set the axes limits\n adjust = lambda mi, ma: (mi - 0.1 * np.abs(mi), ma + 0.1 * ma)\n ax.set_xlim(adjust(*xlim))\n ax.set_ylim(adjust(*ylim))\n if z is not None:\n ax.set_zlim(adjust(*zlim))\n return ax\n\n\ndef plot_scatter_layers(x_y_val,\n ax=None,\n layer_name=None,\n layer_color=None,\n layer_marker=None,\n size=4.0,\n z_ratio=4,\n elev=None,\n azim=88,\n ticks_off=True,\n grid=True,\n surface=True,\n wireframe=False,\n wireframe_resolution=10,\n colorbar=False,\n colorbar_horizontal=False,\n legend_loc='upper center',\n legend_ncol=3,\n legend_colspace=0.4,\n fontsize=8,\n title=None):\n r\"\"\"\n Parameter\n ---------\n z_ratio: float (default: 4)\n the amount of compression that layer in z_axis will be closer\n to each others compared to (x, y) axes\n \"\"\"\n from matplotlib import pyplot as plt\n assert len(x_y_val) > 1, \"Use `plot_scatter_heatmap` to plot only 1 layer\"\n max_z = -np.inf\n min_z = np.inf\n for x, y, val in x_y_val:\n assert len(x) == len(y) == len(val), \"Number of samples mismatch\"\n max_z = max(max_z, np.max(x), np.max(y))\n min_z = min(min_z, np.min(x), np.min(y))\n ax = to_axis(ax, is_3D=True)\n num_classes = len(x_y_val)\n # ====== preparing ====== #\n # name\n layer_name = check_arg_length(dat=layer_name,\n n=num_classes,\n dtype=string_types,\n default='',\n converter=lambda x: str(x))\n # colormap\n layer_color = check_arg_length(dat=layer_color,\n n=num_classes,\n dtype=string_types,\n default='Blues',\n converter=lambda x: plt.get_cmap(str(x)))\n # class marker\n layer_marker = check_arg_length(dat=layer_marker,\n n=num_classes,\n dtype=string_types,\n default='o',\n converter=lambda x: str(x))\n # size\n size = check_arg_length(dat=size,\n n=num_classes,\n dtype=Number,\n default=4.0,\n converter=lambda x: float(x))\n # ====== plotting each class ====== #\n legends = []\n for idx, (alpha, z) in enumerate(\n zip(np.linspace(0.05, 0.4, num_classes),\n np.linspace(min_z / 4, max_z / 4, num_classes))):\n x, y, val = x_y_val[idx]\n num_samples = len(x)\n z = np.full(shape=(num_samples,), fill_value=z)\n _ = ax.scatter(x,\n y,\n z,\n c=val,\n s=size[idx],\n marker=layer_marker[idx],\n cmap=layer_color[idx])\n # ploting surface and wireframe\n if surface or wireframe:\n x, y = np.meshgrid(np.linspace(min(x), max(x), wireframe_resolution),\n np.linspace(min(y), max(y), wireframe_resolution))\n z = np.full_like(x, fill_value=z[0])\n if surface:\n ax.plot_surface(X=x,\n Y=y,\n Z=z,\n color=layer_color[idx](0.5),\n edgecolor='none',\n alpha=alpha)\n if wireframe:\n ax.plot_wireframe(X=x,\n Y=y,\n Z=z,\n linewidth=0.8,\n color=layer_color[idx](0.8),\n alpha=alpha + 0.1)\n # legend\n name = layer_name[idx]\n if len(name) > 0:\n legends.append((name, _))\n # colorbar\n if colorbar:\n cba = plt.colorbar(\n _,\n shrink=0.5,\n pad=0.01,\n orientation='horizontal' if colorbar_horizontal else 'vertical')\n if len(name) > 0:\n cba.set_label(name, fontsize=fontsize)\n # ====== plot the legend ====== #\n if len(legends) > 0:\n legends = ax.legend([i[1] for i in legends], [i[0] for i in legends],\n markerscale=1.5,\n scatterpoints=1,\n scatteryoffsets=[0.375, 0.5, 0.3125],\n loc=legend_loc,\n bbox_to_anchor=(0.5, -0.01),\n ncol=int(legend_ncol),\n columnspacing=float(legend_colspace),\n labelspacing=0.,\n fontsize=fontsize,\n handletextpad=0.1)\n for i, c in enumerate(layer_color):\n legends.legendHandles[i].set_color(c(.8))\n # ====== some configuration ====== #\n if ticks_off:\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n ax.grid(grid)\n if title is not None:\n ax.set_title(str(title))\n if (elev is not None or azim is not None):\n ax.view_init(elev=ax.elev if elev is None else elev,\n azim=ax.azim if azim is None else azim)\n return ax\n",
"# -*- coding: utf-8 -*-\n\"\"\"\"\nThis module contains tools for Gaussian mixture modeling (GMM)\n__author__ = 'Omid Sadjadi, Timothee Kheyrkhah'\n__email__ = '[email protected]'\nModification and GPU-implementation by TrungNT\n\"\"\"\nimport os\nimport pickle\nimport random\nimport threading\nimport time\nfrom collections import Mapping, OrderedDict, defaultdict\n\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import linalg\nfrom six import string_types\n\nfrom odin import backend as K\nfrom bigarray import MmapArray\nfrom odin.ml.base import BaseEstimator, DensityMixin, TransformerMixin\nfrom odin.utils import (MPI, Progbar, array_size, as_tuple, minibatch, cpu_count,\n ctext, defaultdictkey, eprint, is_number, segment_list,\n uuid, wprint)\n\nEPS = 1e-6\n# minimum batch size that will be optimal to transfer\n# the data to GPU for calculation (tested on Titan X)\n# NOTE: tensorflow has a lagging effect, it will be\n# slower than numpy if you evaluate the\n# expression for first time.\nMINIMUM_GPU_BLOCK = 8000 * 120 * 4 # bytes\n\n# ===========================================================================\n# Helper\n# ===========================================================================\ndef zeroStat(post):\n \"\"\" Shape: (1, nmix)\n # sum over all samples\n # Zero-order statistics over all samples and dimension for\n each components\n \"\"\"\n # ====== tensorflow tensor or variable ====== #\n if K.is_tensor(post, inc_distribution=True, inc_variable=True):\n y = tf.reduce_sum(post, axis=0, keepdims=True,\n name=\"zero_stat\")\n # ====== numpy array ====== #\n else:\n y = np.sum(post, axis=0, keepdims=True) # (1, M)\n return y\n\n\ndef firstStat(X, post):\n \"\"\" Shape: (feat_dim, nmix)\n First-order statistics over all samples for each components\n \"\"\"\n # ====== tensorflow tensor or variable ====== #\n if K.is_tensor(X, inc_distribution=True, inc_variable=True):\n y = tf.matmul(tf.transpose(X), post, name='first_stat')\n # ====== numpy array ====== #\n else:\n y = np.dot(X.T, post)\n return y\n\n\ndef secondStat(X, post):\n \"\"\" Shape: (feat_dim, nmix)\n Second-order statistics over all samples for each components\n \"\"\"\n # ====== tensorflow tensor or variable ====== #\n if K.is_tensor(X, inc_distribution=True, inc_variable=True):\n y = tf.matmul(tf.transpose(tf.pow(X, 2)), post, name=\"second_stat\")\n # ====== numpy array ====== #\n else:\n y = np.dot((X ** 2).T, post)\n return y\n\n\ndef logsumexp(X, axis):\n \"\"\"\n Compute log(sum(exp(x),dim)) while avoiding numerical underflow\n \"\"\"\n # ====== tensorflow tensor or variable ====== #\n if K.is_tensor(X, inc_distribution=True, inc_variable=True):\n xmax = tf.reduce_max(X, axis=axis, keepdims=True)\n y = tf.add_n(inputs=[\n xmax,\n tf.log(tf.reduce_sum(input_tensor=tf.exp(X - xmax),\n axis=axis,\n keepdims=True))],\n name='llk')\n # ====== numpy array ====== #\n else:\n xmax = np.max(X, axis=axis, keepdims=True)\n y = xmax + np.log(np.sum(a=np.exp(X - xmax),\n axis=axis,\n keepdims=True))\n return y\n\n\ndef _split_jobs(n_samples, ncpu, device, gpu_factor):\n \"\"\" Return: jobs_cpu, jobs_gpu\"\"\"\n # number of GPU\n # TODO: fix `get_ngpu` here\n ngpu = get_ngpu()\n if ngpu == 0:\n device = 'cpu'\n # jobs split based on both number of CPU and GPU\n if device == 'mix':\n njob = ncpu + 1 + ngpu * gpu_factor\n elif device == 'cpu':\n njob = ncpu + 1\n elif device == 'gpu':\n njob = ngpu + 1\n else:\n raise ValueError(\"Unknown device: '%s'\" % device)\n jobs = np.linspace(start=0, stop=n_samples,\n num=njob, dtype='int32')\n jobs = list(zip(jobs, jobs[1:]))\n # use both GPU and CPU\n if device == 'mix':\n jobs_gpu = [(jobs[i * gpu_factor][0],\n jobs[i * gpu_factor + gpu_factor - 1][1])\n for i in range(ngpu)]\n jobs_cpu = [jobs[i] for i in range(ngpu * gpu_factor, len(jobs))]\n elif device == 'gpu': # only GPU\n jobs_gpu = jobs\n jobs_cpu = []\n elif device == 'cpu': # only CPU\n jobs_gpu = []\n jobs_cpu = jobs\n return jobs_cpu, jobs_gpu\n\ndef _create_batch(X, sad, start, end, batch_size,\n downsample, stochastic,\n seed, curr_nmix, curr_niter):\n \"\"\"\n Return\n ------\n (X, n_selected_frame, n_original_frame)\n i.e. the number of select frames might be different from number\n of original frames after applying SAD\n \"\"\"\n # stochastic downsample, seed change every iter and mixup\n if stochastic:\n random.seed(seed + curr_nmix + curr_niter)\n else: # deterministic\n random.seed(seed)\n all_batches = list(minibatch(n=end - start, batch_size=batch_size))\n random.shuffle(all_batches)\n # iterate over batches\n for batch_id, (batch_start, batch_end) in enumerate(all_batches):\n batch_start += start\n batch_end += start\n n_original_sample = batch_end - batch_start\n # first batch always selected,\n # downsample by randomly ignore a batch\n if batch_id == 0 or \\\n downsample == 1 or \\\n (downsample > 1 and random.random() <= 1. / downsample):\n X_sad = (X[batch_start:batch_end]\n if sad is None else\n X[batch_start:batch_end][sad[batch_start:batch_end].astype('bool')])\n yield X_sad, X_sad.shape[0], n_original_sample\n # ====== batch ignored ====== #\n else:\n yield None, n_original_sample, n_original_sample\n\ndef _create_batch_indices(X, sad, indices, batch_size,\n downsample, stochastic,\n seed, curr_nmix, curr_niter):\n \"\"\"\n Return\n ------\n (X, n_selected_frame, n_original_frame)\n i.e. the number of select frames might be different from number\n of original frames after applying SAD\n \"\"\"\n # stochastic downsample, seed change every iter and mixup\n if stochastic:\n random.seed(seed + curr_nmix + curr_niter)\n else: # deterministic\n random.seed(seed)\n random.shuffle(indices)\n # ====== prepare the buffer ====== #\n X_buffer = []\n n_original_buffer = 0\n n_selected_buffer = 0\n # ====== iterate over each file ====== #\n for batch_id, (name, (file_start, file_end)) in enumerate(indices):\n n_original_sample = file_end - file_start\n # first batch always selected,\n # downsample by randomly ignore a batch\n if batch_id == 0 or \\\n downsample == 1 or \\\n (downsample > 1 and random.random() <= 1. / downsample):\n # not enough sample for batching\n if n_original_sample <= batch_size:\n X_sad = (X[file_start:file_end]\n if sad is None else\n X[file_start:file_end][sad[file_start:file_end].astype('bool')])\n # store in buffer\n X_buffer.append(X_sad)\n n_selected_buffer += X_sad.shape[0]\n n_original_buffer += n_original_sample\n # split into smaller mini-batch\n else:\n for batch_start, batch_end in minibatch(n=n_original_sample, batch_size=batch_size):\n batch_start = batch_start + file_start\n batch_end = batch_end + file_start\n X_sad = (X[batch_start: batch_end]\n if sad is None else\n X[batch_start: batch_end][sad[batch_start: batch_end].astype('bool')])\n if X_sad.shape[0] >= batch_size: # full batch\n yield X_sad, X_sad.shape[0], n_original_sample\n else: # store in buffer\n X_buffer.append(X_sad)\n n_selected_buffer += X_sad.shape[0]\n n_original_buffer += n_original_sample\n # ====== batch ignored ====== #\n else:\n yield None, n_original_sample, n_original_sample\n # ====== check buffer to return ====== #\n if n_selected_buffer >= batch_size:\n yield np.concatenate(X_buffer, axis=0), n_selected_buffer, n_original_buffer\n X_buffer = []\n n_selected_buffer = 0\n n_original_buffer = 0\n # ====== check final buffer to return ====== #\n if len(X_buffer) > 0:\n yield np.concatenate(X_buffer, axis=0), n_selected_buffer, n_original_buffer\n\nclass _ExpectationResults(object):\n \"\"\" ExpectationResult \"\"\"\n\n def __init__(self, n_samples, nb_results, name, print_progress):\n super(_ExpectationResults, self).__init__()\n # thread lock\n self.lock = threading.Lock()\n # progress bar\n self.prog = Progbar(target=n_samples, print_report=True,\n print_summary=False, name=name)\n # GMM: Z, F, S, L, nframes\n # I-vector: LU, RU, llk, nframes\n self.stats = [0. for i in range(int(nb_results))]\n self.print_progress = bool(print_progress)\n\n def update(self, res):\n \"\"\"\n integer (or a number): number of processed samples (update the progress bar)\n otherwise: list of results (update the statistics)\n \"\"\"\n # thread-safe udpate\n self.lock.acquire()\n try:\n # returned number of processed samples\n if is_number(res) and self.print_progress:\n self.prog.add(res)\n # return the statistics, end of process\n else:\n for i, r in enumerate(res):\n self.stats[i] += r\n finally:\n self.lock.release()\n\n# ===========================================================================\n# Main GMM\n# ===========================================================================\nclass GMM(DensityMixin, BaseEstimator, TransformerMixin):\n r\"\"\" Gaussian Mixture Model with diagonal covariance.\n\n Parameters\n ----------\n nmix : int\n number of mixtures\n nmix_start : int\n the algorithm start from given number of mixture, then perform\n E-M and split to increase the mixtures to desire number\n niter : int (default: 16)\n number of iteration for E-M algorithm\n dtype : {str, numpy.dtype} (default: float32)\n desire dtype for mean, std, weights and input matrices\n It is recommended to keep 'float32', since this speed up\n a lot on GPU\n allow_rollback : bool (default: True)\n If True, reset the `mean`, `sigma` and `w` to the last\n stable iteration, when `sigma` values smaller than 0\n exit_on_error : bool (default: True)\n Stop fitting when EM reach singular value and `sigma` become\n numerical instabable (i.e. its values are smaller than 0)\n batch_size : {int, 'auto'}\n if 'auto', used `12 Megabytes` block for CPU batch and\n `25 Megabytes` block for GPU batch\n device : {'cpu', 'gpu', 'mix'}\n 'gpu' - run the computaiton on GPU\n 'cpu' - use multiprocessing for multiple cores\n 'mix' - use both GPU and multi-processing\n * It is suggested to use mix of GPU and CPU if you have\n more than 24 cores CPU, otherwise, 'gpu' gives the best\n performance\n ncpu : int\n number of processes for parallel calculating Expectation\n gpu_factor : int\n how much jobs GPU will handle more than CPU\n (i.e. `njob_gpu = gpu_factor * njob_cpu`)\n stochastic_downsample : bool\n if True, a subset of data is selected differently after\n each iteration => the training is stochastic.\n if False, a deterministic selection of data is performed\n each iteration => the training is deterministic.\n seed : int\n random seed for reproducible\n path : {str, None}\n If given a path, save the model after everytime its\n parameters changed (i.e. `maximization` or `gmm_mixup`\n are called)\n name : {str, None}\n special name for this `Tmatrix` instance\n\n\n Attributes\n ----------\n mu : (feat_dim, nmix)\n mean vector for each component\n sigma : (feat_dim, nmix)\n standard deviation for each component\n w : (1, nmix)\n weights of each component\n\n Note\n ----\n Memory throughput is the bottleneck in most of the case,\n try to move the data to faster storage before fitting.\n\n \"\"\"\n\n STANDARD_CPU_BATCH_SIZE = 12 * 1024 * 1024 # 12 Megabytes\n STANDARD_GPU_BATCH_SIZE = 25 * 1024 * 1024 # 25 Megabytes\n\n def __init__(self, nmix, nmix_start=1, niter=16, dtype='float32',\n allow_rollback=True, exit_on_error=False,\n batch_size_cpu='auto', batch_size_gpu='auto',\n downsample=1, stochastic_downsample=True,\n device='cpu', ncpu=1, gpu_factor=80,\n seed=1234, path=None, name=None):\n super(GMM, self).__init__()\n self._path = path if isinstance(path, string_types) else None\n # ====== set number of mixtures ====== #\n # start from 1 mixture, then split and up\n nmix = int(nmix)\n if nmix < 1:\n raise ValueError(\"Number of Mixture must be greater than 1.\")\n self._nmix = nmix\n self._curr_nmix = np.clip(int(nmix_start), 1, self._nmix)\n # others dimension\n self._feat_dim = None\n self._niter = int(niter)\n self.batch_size_cpu = batch_size_cpu\n self.batch_size_gpu = batch_size_gpu\n # ====== downsample ====== #\n self.downsample = int(downsample)\n self.stochastic_downsample = bool(stochastic_downsample)\n self._seed = int(seed)\n # ====== multi-processing ====== #\n self.gpu_factor = int(gpu_factor)\n # cpu\n if ncpu is None:\n ncpu = cpu_count() - 1\n self.ncpu = int(ncpu)\n # device\n self.set_device(device)\n # ====== state variable ====== #\n # store history of {nmix -> [llk_1, llk_2] ...}\n self._llk_hist = defaultdict(list)\n # ====== error handling ====== #\n self.allow_rollback = bool(allow_rollback)\n self.exit_on_error = bool(exit_on_error)\n self._stop_fitting = False\n # ====== name ====== #\n self._dtype = np.dtype(dtype)\n if name is None:\n name = uuid(length=8)\n self._name = 'GMM_%s' % name\n else:\n self._name = str(name)\n\n def __getstate__(self):\n # 'means', 'variances', 'weights'\n # self.mean, self.sigma, self.w\n if not self.is_initialized:\n raise RuntimeError(\"GMM hasn't been initialized, nothing to save\")\n return (self.mean, self.sigma, self.w,\n self.allow_rollback, self.exit_on_error,\n self._nmix, self._curr_nmix, self._feat_dim,\n self._niter, self.batch_size_cpu, self.batch_size_gpu,\n self.downsample, self.stochastic_downsample,\n self._seed, self._llk_hist,\n self.ncpu, self._device, self.gpu_factor,\n self._dtype, self._path, self._name)\n\n def __setstate__(self, states):\n (self.mean, self.sigma, self.w,\n self.allow_rollback, self.exit_on_error,\n self._nmix, self._curr_nmix, self._feat_dim,\n self._niter, self.batch_size_cpu, self.batch_size_gpu,\n self.downsample, self.stochastic_downsample,\n self._seed, self._llk_hist,\n self.ncpu, self._device, self.gpu_factor,\n self._dtype, self._path, self._name) = states\n # basic constants\n self._stop_fitting = False\n self._feat_const = self.feat_dim * np.log(2 * np.pi)\n self.X_ = tf.placeholder(shape=(None, self.feat_dim),\n dtype=self.dtype,\n name='GMM_input')\n # init posterior\n self._resfresh_cpu_posterior()\n self._refresh_gpu_posterior()\n # ====== warning no GPU ====== #\n if self._device in ('gpu', 'mix') and get_ngpu() == 0:\n wprint(\"Enabled GPU device, but no GPU found!\")\n\n def __str__(self):\n if not self.is_initialized:\n return '<\"%s\" nmix:%d initialized:False>' % (self.name, self._nmix)\n s = '<\"%s\" nmix:%s ndim:%s mean:%s std:%s w:%s CPU:%s GPU:%s>' %\\\n (ctext(self.name, 'yellow'),\n ctext(self._nmix, 'cyan'),\n ctext(self._feat_dim, 'cyan'),\n ctext(self.mean.shape, 'cyan'),\n ctext(self.sigma.shape, 'cyan'),\n ctext(self.w.shape, 'cyan'),\n ctext(self.batch_size_cpu, 'cyan'),\n ctext(self.batch_size_gpu, 'cyan'),\n )\n return s\n\n # ==================== properties ==================== #\n def set_device(self, device):\n device = str(device).lower()\n if device not in ('cpu', 'gpu', 'mix'):\n raise ValueError(\"`device` must be one of the following: 'cpu', 'gpu', or 'mix'\")\n # ====== warning no GPU ====== #\n if device in ('gpu', 'mix') and get_ngpu() == 0:\n wprint(\"Using GPU device but NO GPU detected, \"\n \"tensorflow will switch to slower CPU computation!\")\n self._device = device\n return self\n\n @property\n def device(self):\n return self._device\n\n @property\n def path(self):\n return self._path\n\n @property\n def name(self):\n return self._name\n\n @property\n def is_initialized(self):\n return self._feat_dim is not None\n\n @property\n def is_fitted(self):\n return self._curr_nmix == self._nmix\n\n @property\n def nmix(self):\n return self._nmix\n\n @property\n def feat_dim(self):\n if not self.is_initialized:\n raise RuntimeError(\"GMM has not been initialized on data.\")\n return self._feat_dim\n\n @property\n def history(self):\n \"\"\" Return the history of fitting this GMM in following format:\n `[(current_nmix, current_niter, llk), ...]`\n \"\"\"\n return tuple(self._llk_hist)\n\n @property\n def dtype(self):\n return self._dtype\n\n # ==================== initialization ==================== #\n def _resfresh_cpu_posterior(self):\n \"\"\" Refresh cached value for CPu computations. \"\"\"\n expressions = {}\n precision = 1 / (self.sigma + EPS)\n C = np.sum((self.mean ** 2) * precision, axis=0, keepdims=True) + \\\n np.sum(np.log(self.sigma + EPS), axis=0, keepdims=True) - \\\n 2 * np.log(self.w + EPS) # TODO: check here if add EPS to self.w\n mu_precision = self.mean * precision\n expressions['precision'] = precision\n expressions['mu_precision'] = mu_precision\n expressions['C'] = C\n self.__expressions_cpu = expressions\n\n def _refresh_gpu_posterior(self):\n \"\"\" Call this function when you update the mixture\n components.\n\n Unlike CPU computation, tensorflow graph on need to\n renew it placeholder which represent: mu, sigma, weight\n when GMM mixup.\n \"\"\"\n expressions = {}\n # ====== proper scope ====== #\n if self._curr_nmix < self.nmix:\n scope = self.name + str(self._curr_nmix)\n else:\n scope = self.name\n # ====== build the graph ====== #\n with tf.variable_scope(scope):\n mu = tf.placeholder(shape=(self.feat_dim, self._curr_nmix),\n dtype=self.dtype,\n name='GMM_mu')\n sigma = tf.placeholder(shape=(self.feat_dim, self._curr_nmix),\n dtype=self.dtype,\n name='GMM_sigma')\n w = tf.placeholder(shape=(1, self._curr_nmix),\n dtype=self.dtype,\n name='GMM_weight')\n expressions['mu'] = mu\n expressions['sigma'] = sigma\n expressions['w'] = w\n # ====== log probability ====== #\n # (feat_dim, nmix)\n precision = 1 / (sigma + EPS)\n C = tf.reduce_sum((mu ** 2) * precision,\n axis=0, keepdims=True) + \\\n tf.reduce_sum(tf.log(sigma + EPS),\n axis=0, keepdims=True) - \\\n 2 * tf.log(w)\n D = tf.matmul(self.X_ ** 2, precision) - \\\n 2 * tf.matmul(self.X_, mu * precision) + \\\n self.feat_dim * np.log(2 * np.pi)\n # (batch_size, nmix)\n logprob = tf.multiply(x=tf.constant(-0.5, dtype=self.dtype),\n y=C + D,\n name='logprob')\n expressions['logprob'] = logprob # (batch_size, nmix)\n # ====== posterior and likelihood ====== #\n llk = logsumexp(logprob, axis=1) # (batch_size, 1)\n post = tf.exp(logprob - llk, name='postprob') # (batch_size, nmix)\n expressions['llk'] = llk\n expressions['post'] = post\n # ====== expectation ====== #\n expressions['zero'] = zeroStat(post)\n expressions['first'] = firstStat(self.X_, post)\n expressions['second'] = secondStat(self.X_, post)\n expressions['L'] = tf.reduce_sum(llk, axis=None, name='sum_llk')\n self.__expressions_gpu = expressions\n\n def initialize(self, X):\n indices = None\n if isinstance(X, (tuple, list)):\n tmp = [i for i in X if hasattr(i, 'shape')][0]\n indices = [i for i in X if i != tmp][0]\n X = tmp\n # ====== check X ====== #\n if not isinstance(X, np.ndarray):\n raise ValueError(\"`X` must be numpy.ndarray\")\n # ====== check indices ====== #\n if isinstance(indices, Mapping):\n indices = list(indices.items())\n elif not isinstance(indices, (tuple, list, np.ndarray, type(None))):\n raise ValueError(\"`indices` must be None, Mapping, tuple, list or numpy.ndarray\")\n # ====== get input info ====== #\n if hasattr(X, 'ndim'):\n ndim = X.ndim\n elif hasattr(X, 'get_shape'):\n ndim = len(X.shape.as_list())\n else:\n raise ValueError(\"Cannot number of dimension from input.\")\n\n if hasattr(X, 'shape'):\n feat_dim = X.shape[1]\n elif hasattr(X, 'get_shape'):\n feat_dim = X.shape.as_list()[1]\n else:\n raise ValueError(\"Cannot get feature dimension from input.\")\n # ====== already init ====== #\n if self.is_initialized:\n # validate the inputs\n if ndim != 2 or feat_dim != self._feat_dim:\n raise RuntimeError(\"Input must be 2-D matrix with the 1st \"\n \"dimension equal to: %d\" % feat_dim)\n return X, indices\n # ====== create input placeholder ====== #\n self._feat_dim = int(feat_dim)\n # const for specific dimension\n self._feat_const = self.feat_dim * np.log(2 * np.pi)\n # infer batch_size\n if isinstance(self.batch_size_cpu, string_types):\n self.batch_size_cpu = int(GMM.STANDARD_CPU_BATCH_SIZE /\n (self.feat_dim * self.dtype.itemsize))\n if isinstance(self.batch_size_gpu, string_types):\n self.batch_size_gpu = int(GMM.STANDARD_GPU_BATCH_SIZE /\n (self.feat_dim * self.dtype.itemsize))\n # [batch_size, feat_dim]\n self.X_ = tf.placeholder(shape=(None, self.feat_dim),\n dtype=self.dtype,\n name='GMM_input')\n # ====== init ====== #\n # (D, M)\n self.mean = np.zeros((feat_dim, self._curr_nmix), dtype=self._dtype)\n # (D, M)\n self.sigma = np.ones((feat_dim, self._curr_nmix), dtype=self._dtype)\n # (1, M)\n self.w = np.ones((1, self._curr_nmix), dtype=self._dtype)\n # init posterior\n self._resfresh_cpu_posterior()\n self._refresh_gpu_posterior()\n return X, indices\n\n # ==================== sklearn ==================== #\n def fit(self, X, y=None):\n \"\"\"\n Parameters\n ----------\n X : {numpy.ndarray, tuple, list}\n in case a tuple is given, two options are considered:\n - length of the list is 1: only training feature is given\n - length of the list is 2: training data (numpy.ndarray),\n indices or sad indices (numpy.ndarray)\n - length of the list is 3: training data (numpy.ndarray),\n sad indices (numpy.ndarray), indices\n where the `indices` is a dictionary of the mapping\n 'file_name' -> (start_index, end_index) in the training\n data array\n\n NOTE\n ----\n from 1, 2, 4 components, python multi-threading is fastest\n from 8, 16 components, python multi-processing is fastest\n from > 32 components, GPU scales much much better.\n \"\"\"\n # if indices is given it should be sorted for optimal\n # memory access\n if not isinstance(X, (tuple, list)):\n X = (X,)\n sad = None\n indices = None\n if len(X) == 1:\n data = X[0]\n elif len(X) == 2:\n if hasattr(X[1], 'shape') and X[0].shape[0] == X[1].shape[0]:\n data, sad = X\n else:\n data, indices = X\n elif len(X) == 3:\n data, sad, indices = X\n else:\n raise ValueError(\"No support for `X` in type of list with length: %d\" % len(X))\n # validate data\n assert hasattr(data, 'shape') and data.ndim == 2, \\\n 'Input data must be instance of 2-D ndarray but give: %s' % str(type(data))\n # check if indices exist\n if indices is not None:\n if isinstance(indices, Mapping):\n indices = list(indices.items())\n indices = sorted(indices, key=lambda x: x[1][0])\n X = (data, indices)\n # otherwise, only data and sad are given\n else:\n X = data\n # ====== start GMM ====== #\n # supports 16384 components, modify for more components\n niter = [1, 2, 4, 4, 4, 4, 6, 6, 10, 10, 10, 10, 10, 16, 16]\n niter[int(np.log2(self._nmix))] = self._niter\n self._stop_fitting = False\n # run the algorithm\n while True:\n # fitting the mixtures\n curr_nmix = self._curr_nmix\n last_niter = len(self._llk_hist[curr_nmix])\n idx = int(np.log2(curr_nmix))\n curr_niter = niter[idx] - last_niter\n if curr_niter > 0:\n for i in range(curr_niter):\n self.expectation_maximization(X, sad=sad, print_progress=True)\n # check if stop now\n if self._stop_fitting:\n return self\n print('---')\n # update the mixtures\n if curr_nmix < self._nmix:\n self.gmm_mixup()\n else:\n break\n return self\n\n def score(self, X, y=None):\n \"\"\" Compute the log-likelihood of each example to\n the Mixture of Components.\n \"\"\"\n post = self.logprob(X) # (batch_size, nmix)\n return logsumexp(post, axis=1) # (batch_size, 1)\n\n def transform(self, X, zero=True, first=True, device=None):\n \"\"\" Compute centered statistics given X and fitted mixtures\n\n Parameters\n ----------\n X : ndarray\n input feature [n_samples, feat_dim] (e.g. all frames\n of an utterance for audio data)\n zero : bool (default: True)\n if True, return the zero-th order statistics\n first : bool (default: True)\n if True, return the first order statistics\n device : {None, 'cpu', 'gpu'}\n select device for execute the expectation calculation\n\n Return\n ------\n zero-th statistics: [1, nmix]\n e.g. the assignment score each samples to each components, hence,\n `#frames = Z.sum()`\n first statistics: [1, feat_dim * nmix]\n dot-product of each sample and the posteriors.\n\n NOTE\n ----\n For more option check `GMM.expectation`\n \"\"\"\n if device is None:\n device = self._device\n zero = bool(zero)\n first = bool(first)\n if not zero and not first:\n raise ValueError(\"One of `zero` or `first` must be True\")\n assert X.ndim == 2 and X.shape[1] == self.feat_dim, \\\n \"`X` must be 2-D matrix, with `X.shape[1]=%d`; but given: %s\" % \\\n (self.feat_dim, str(X.shape))\n # ====== expectation ====== #\n Z = None\n F = None; F_hat = None\n results = self._fast_expectation(X, zero=zero, first=first,\n second=False, llk=False,\n on_gpu=device != 'cpu')\n # ====== return the results ====== #\n if zero and first:\n Z, F = results\n # this equal to: .ravel()[np.newaxis, :]\n F_hat = np.reshape(F - self.mean * Z,\n newshape=(1, self.feat_dim * self._curr_nmix),\n order='F')\n return Z, F_hat\n elif zero and not first:\n Z = results\n return Z\n elif not zero and first:\n F = results\n # this equal to: .ravel()[np.newaxis, :]\n F_hat = np.reshape(F - self.mean * Z,\n newshape=(1, self.feat_dim * self._curr_nmix),\n order='F')\n return F_hat\n\n def transform_to_disk(self, X, indices, sad=None,\n pathZ=None, pathF=None, name_path=None,\n dtype='float32', device='cpu', ncpu=None,\n override=True):\n \"\"\" Same as `transform`, however, save the transformed statistics\n to file using `odin.fuel.MmapArray`\n\n Return\n ------\n zero-th statistics: [1, nmix]\n e.g. the assignment score each samples to each components, hence,\n `#frames = Z.sum()`\n first statistics: [1, feat_dim * nmix]\n dot-product of each sample and the posteriors.\n\n Note\n ----\n If your data contain many very long utterances, it is suggested to use\n `device='gpu'`, otherwise, 'cpu' is mostly significant faster.\n \"\"\"\n # ====== prepare inputs ====== #\n if isinstance(indices, Mapping):\n indices = sorted(indices.items(), key=lambda x: x[1][0])\n if sad is not None:\n assert sad.shape[0] == X.shape[0], \\\n \"Number of samples in `X` (%d) and `sad` (%d) are mismatched\" % (len(X), len(sad))\n assert sad.ndim == 1 or (sad.ndim == 2 and sad.shape[1] == 1), \\\n \"Invalid shape for `sad.shape=%s`\" % str(sad.shape)\n # ====== check device ====== #\n if device is None:\n device = self._device\n on_gpu = True if device != 'cpu' and get_ngpu() > 0 else False\n name_list = []\n prog = Progbar(target=len(indices),\n print_report=True, print_summary=True,\n name=\"Saving zero-th and first order statistics\")\n # ====== init data files ====== #\n if pathZ is not None:\n if os.path.exists(pathZ):\n if override:\n os.remove(pathZ)\n z_dat = MmapArray(path=pathZ, dtype=dtype,\n shape=(None, self.nmix))\n else:\n z_dat = None\n if pathF is not None:\n if os.path.exists(pathF):\n if override:\n os.remove(pathF)\n f_dat = MmapArray(path=pathF, dtype=dtype,\n shape=(None, self.nmix * self.feat_dim))\n else:\n f_dat = None\n\n # ====== helper ====== #\n def _update_zf(Z, F):\n # save zero-th stats\n if z_dat is not None:\n z_dat.append(Z)\n # save first stats\n if f_dat is not None:\n f_dat.append(F)\n\n def _batched_transform(s, e, on_gpu):\n reduction = np.floor(np.power(2, self._curr_nmix / 1024))\n batch_size = self.batch_size_gpu if on_gpu else self.batch_size_cpu\n batch_size = int(batch_size / reduction)\n x = X[s:e]\n if sad is not None:\n x = x[sad[s:e].astype('bool')]\n if x.shape[0] <= batch_size:\n res = [self._fast_expectation(x,\n zero=z_dat is not None or f_dat is not None,\n first=f_dat is not None,\n second=False, llk=False,\n on_gpu=on_gpu)]\n else:\n res = [self._fast_expectation(x[start:end],\n zero=z_dat is not None or f_dat is not None,\n first=f_dat is not None,\n second=False, llk=False,\n on_gpu=on_gpu)\n for start, end in minibatch(n=x.shape[0], batch_size=batch_size)]\n Z = sum(r[0] for r in res)\n if len(res[0]) == 2:\n F = sum(r[1] for r in res)\n F = np.reshape(a=F - self.mean * Z,\n newshape=(Z.shape[0], self._feat_dim * self._curr_nmix),\n order='F')\n else:\n F = None\n return Z, F\n # ====== running on GPU ====== #\n if on_gpu:\n for n, (start, end) in indices:\n Z, F = _batched_transform(start, end, on_gpu=True)\n _update_zf(Z, F)\n name_list.append(n)\n prog.add(1)\n # ====== run on CPU ====== #\n else:\n def map_func(j):\n Z_list, F_list = [], []\n name = []\n for n, (start, end) in j:\n name.append(n)\n Z, F = _batched_transform(start, end, on_gpu=False)\n if z_dat is not None:\n Z_list.append(Z)\n if f_dat is not None:\n F_list.append(F)\n yield 1\n # concatenate into single large matrix\n if len(Z_list) > 0:\n Z_list = np.concatenate(Z_list, axis=0)\n if len(F_list) > 0:\n F_list = np.concatenate(F_list, axis=0)\n yield name, Z_list, F_list\n # run the MPI task\n mpi = MPI(jobs=list(indices.items()) if isinstance(indices, Mapping)\n else indices,\n func=map_func,\n ncpu=self.ncpu if ncpu is None else int(ncpu),\n batch=max(2, self.batch_size_cpu // (self.ncpu * 2)),\n hwm=2**25)\n for results in mpi:\n if is_number(results):\n prog['Z_path'] = str(pathZ)\n prog['F_path'] = str(pathF)\n prog.add(results)\n else:\n name, Z, F = results\n _update_zf(Z, F)\n name_list += name\n # ====== flush and return ====== #\n if z_dat is not None:\n z_dat.flush()\n z_dat.close()\n if f_dat is not None:\n f_dat.flush()\n f_dat.close()\n # ====== save name_list ====== #\n if isinstance(name_path, string_types):\n np.savetxt(fname=name_path, X=name_list, fmt='%s')\n return name_list\n\n # ==================== math helper ==================== #\n def logprob(self, X):\n \"\"\" Shape: [batch_size, nmix]\n the log probability of each observations to each components\n given the GMM.\n \"\"\"\n self.initialize(X)\n if self._device != 'cpu':\n feed_dict = {self.X_: X}\n feed_dict[self.__expressions_gpu['mu']] = self.mean\n feed_dict[self.__expressions_gpu['sigma']] = self.sigma\n feed_dict[self.__expressions_gpu['w']] = self.w\n return K.eval(x=self.__expressions_gpu['logprob'],\n feed_dict=feed_dict)\n # ====== run on numpy ====== #\n # (feat_dim, nmix)\n precision = self.__expressions_cpu['precision']\n mu_precision = self.__expressions_cpu['mu_precision']\n C = self.__expressions_cpu['C']\n X_2 = X ** 2\n D = np.dot(X_2, precision) - \\\n 2 * np.dot(X, mu_precision) + \\\n self._feat_const\n # (batch_size, nmix)\n logprob = -0.5 * (C + D)\n return logprob\n\n def postprob(self, X, gpu='auto'):\n \"\"\" Shape: (batch_size, nmix)\n The posterior probability of mixtures for each frame\n \"\"\"\n self.initialize(X)\n if self._device != 'cpu':\n feed_dict = {self.X_: X}\n feed_dict[self.__expressions_gpu['mu']] = self.mean\n feed_dict[self.__expressions_gpu['sigma']] = self.sigma\n feed_dict[self.__expressions_gpu['w']] = self.w\n return K.eval(x=self.__expressions_gpu['post'],\n feed_dict=feed_dict)\n # ====== run on numpy ====== #\n # (feat_dim, nmix)\n precision = self.__expressions_cpu['precision']\n mu_precision = self.__expressions_cpu['mu_precision']\n C = self.__expressions_cpu['C']\n X_2 = X ** 2\n D = np.dot(X_2, precision) - \\\n 2 * np.dot(X, mu_precision) + \\\n self._feat_const\n # (batch_size, nmix)\n logprob = -0.5 * (C + D)\n # ====== posterior and likelihood ====== #\n llk = logsumexp(logprob, axis=1) # (batch_size, 1)\n post = np.exp(logprob - llk) # (batch_size, nmix)\n return post\n\n def llk(self, X, gpu='auto'):\n \"\"\" Shape: (batch_size, 1)\n The log-likelihood value of each frame to all components\n \"\"\"\n self.initialize(X)\n if self._device != 'cpu':\n feed_dict = {self.X_: X}\n feed_dict[self.__expressions_gpu['mu']] = self.mean\n feed_dict[self.__expressions_gpu['sigma']] = self.sigma\n feed_dict[self.__expressions_gpu['w']] = self.w\n return K.eval(x=self.__expressions_gpu['llk'],\n feed_dict=feed_dict)\n # ====== run on numpy ====== #\n # (feat_dim, nmix)\n precision = self.__expressions_cpu['precision']\n mu_precision = self.__expressions_cpu['mu_precision']\n C = self.__expressions_cpu['C']\n X_2 = X ** 2\n D = np.dot(X_2, precision) - \\\n 2 * np.dot(X, mu_precision) + \\\n self._feat_const\n # (batch_size, nmix)\n logprob = -0.5 * (C + D)\n # ====== posterior and likelihood ====== #\n llk = logsumexp(logprob, axis=1) # (batch_size, 1)\n return llk\n\n def _fast_expectation(self, X, zero=True, first=True, second=True,\n llk=True, on_gpu=False):\n # ====== run on GPU ====== #\n if on_gpu:\n Z, F, S, L = [self.__expressions_gpu[name]\n for name in ('zero', 'first', 'second', 'L')]\n feed_dict = {self.X_: X}\n feed_dict[self.__expressions_gpu['mu']] = self.mean\n feed_dict[self.__expressions_gpu['sigma']] = self.sigma\n feed_dict[self.__expressions_gpu['w']] = self.w\n outputs = [i for i, j in zip((Z, F, S, L),\n (zero, first, second, llk))\n if j]\n results = K.eval(x=outputs, feed_dict=feed_dict)\n # ====== run on numpy ====== #\n else:\n results = []\n # (feat_dim, nmix)\n precision = self.__expressions_cpu['precision']\n mu_precision = self.__expressions_cpu['mu_precision']\n C = self.__expressions_cpu['C']\n X_2 = X ** 2\n D = np.dot(X_2, precision) - \\\n 2 * np.dot(X, mu_precision) + \\\n self._feat_const\n # (batch_size, nmix)\n logprob = -0.5 * (C + D)\n # ====== posterior and likelihood ====== #\n LLK = logsumexp(logprob, axis=1) # (batch_size, 1)\n post = np.exp(logprob - LLK) # (batch_size, nmix)\n # ====== expectation ====== #\n if zero:\n Z = zeroStat(post)\n results.append(Z)\n if first:\n F = firstStat(X, post)\n results.append(F)\n if second:\n S = np.dot(X_2.T, post) # dont calculate X**2 again\n results.append(S)\n if llk:\n L = np.sum(LLK, axis=None)\n results.append(L)\n # ====== return ====== #\n return results if len(results) > 1 else results[0]\n\n def expectation(self, X, sad=None,\n zero=True, first=True, second=True,\n llk=True, device=None, print_progress=True):\n \"\"\"\n Parameters\n ----------\n X : numpy.ndarray [batch_size, feat_dim]\n input array, with feature dimension is the final dimension\n zero : bool (default: True)\n if True, return zero-order statistics\n first : bool (default: True)\n if True, return first-order statistics\n second : bool (default: True)\n if True, return second-order statistics\n llk : bool (default: True)\n if True, return the mean log-likelihood\n device : {None, 'cpu', 'gpu', 'mix'}\n None - keep the orginal device specified in init\n 'gpu' - run the computaiton on GPU\n 'cpu' - use multiprocessing for multiple cores\n 'mix' - use both GPU and multi-processing\n print_progress : bool (default: True)\n if fitting required multiple batches, print the\n progress bar.\n\n Return\n ------\n The order of return value:\n zero (optional) : ndarray [1, nmix]\n first (optional) : ndarray [feat_dim, nmix]\n second(optional) : ndarray [feat_dim, nmix]\n llk (optional) : scalar ()\n \"\"\"\n X, indices = self.initialize(X)\n if sad is not None:\n assert sad.shape[0] == X.shape[0], \\\n \"Number of samples for X and sad mismatch X.shape=%s and sad.shape=%s\" %\\\n (X.shape, sad.shape)\n assert sad.ndim == 1 or (sad.ndim == 2 and sad.shape[1] == 1), \\\n \"`sad` must be 1-D array or 2-D array with second dimension equal to 1\"\n # ====== total number of sample (WITHOUT SAD) ====== #\n if indices is None:\n n_samples = X.shape[0]\n else:\n n_samples = sum(end - start\n for name, (start, end) in indices)\n # ====== pick device ====== #\n device = self._device if device is None else str(device).lower()\n if device not in ('gpu', 'cpu', 'mix'):\n raise ValueError(\"`device` can only be of the following:\"\n \"'gpu', 'cpu', and 'mix'.\")\n # ====== only 1 batch ====== #\n if (n_samples <= self.batch_size_cpu and self._device == 'cpu') or\\\n (n_samples <= self.batch_size_gpu and self._device in ('gpu', 'mix')):\n # NO indices\n if indices is None:\n X_sad = X if sad is None else X[sad.astype('bool')]\n # given indices\n else:\n X_sad = []\n for name, (start, end) in indices:\n X_sad.append(X[start:end]\n if sad is None else\n X[start:end][sad[start:end].astype('bool')])\n X_sad = np.concatenate(X_sad, axis=0)\n # applying _fast_expectation\n results = self._fast_expectation(X_sad, zero, first, second, llk,\n on_gpu=self._device != 'cpu')\n # calculate log-likelihood\n # (NOTE: after applying SAD, the number of sample may reduced)\n if llk:\n if isinstance(results, (tuple, list)):\n results = tuple(results[:-1]) + (np.array(results[-1] / X_sad.shape[0]),)\n else: # only llk returned\n results = np.array(results / X_sad.shape[0])\n return results\n # ====== mapping method ====== #\n curr_niter = len(self._llk_hist[self._curr_nmix])\n curr_nmix = self._curr_nmix\n\n def map_expectation(start_end_gpu):\n reduction = np.floor(np.power(2, curr_nmix / 1024))\n get_batch_size = lambda on_gpu: int((self.batch_size_gpu if on_gpu\n else self.batch_size_cpu) / reduction)\n # NO indices\n if indices is None:\n (start, end), on_gpu = start_end_gpu\n batch_iterator = _create_batch(X, sad, start, end,\n batch_size=get_batch_size(on_gpu),\n downsample=self.downsample,\n stochastic=self.stochastic_downsample,\n seed=self._seed,\n curr_nmix=curr_nmix,\n curr_niter=curr_niter)\n # Given indices\n else:\n jobs, on_gpu = start_end_gpu\n batch_iterator = _create_batch_indices(X, sad, jobs,\n batch_size=get_batch_size(on_gpu),\n downsample=self.downsample,\n stochastic=self.stochastic_downsample,\n seed=self._seed,\n curr_nmix=curr_nmix,\n curr_niter=curr_niter)\n # Z, F, S, L, n_frames\n results = [0., 0., 0., 0., 0]\n for y, n_selected_frame, n_original_sample in batch_iterator:\n # update expectation\n if y is not None:\n for i, res in enumerate(\n self._fast_expectation(y, zero=True, first=True, second=True,\n llk=True, on_gpu=on_gpu)):\n results[i] += res\n results[-1] += n_selected_frame\n # return the progress\n yield n_original_sample\n yield tuple(results)\n\n def thread_expectation(results, start_end):\n for res in map_expectation((start_end, True)):\n results.update(res)\n # ====== split the jobs ====== #\n jobs_cpu, jobs_gpu = _split_jobs(n_samples=n_samples,\n ncpu=self.ncpu, device=device,\n gpu_factor=self.gpu_factor)\n # ====== convert jobs to indices jobs ====== #\n if indices is not None:\n indices = list(indices)\n # convert GPU jobs first as priority\n new_gpu_jobs = []\n for s, e in jobs_gpu:\n j = []\n n = e - s\n while n > 0 and len(indices) >= 1:\n tmp = indices.pop()\n n -= tmp[1][1] - tmp[1][0]\n j.append(tmp)\n new_gpu_jobs.append(j)\n jobs_gpu = new_gpu_jobs\n # convert CPU jobs\n new_cpu_jobs = []\n for s, e in jobs_cpu:\n j = []\n n = e - s\n while n > 0 and len(indices) >= 1:\n tmp = indices.pop()\n n -= tmp[1][1] - tmp[1][0]\n j.append(tmp)\n new_cpu_jobs.append(j)\n jobs_cpu = new_cpu_jobs\n # ====== run multiprocessing ====== #\n # Z, F, S, L, nfr\n results = _ExpectationResults(n_samples=n_samples, nb_results=5,\n name=\"[GMM] cmix:%d nmix:%d ndim:%d iter:%d\" %\n (curr_nmix, self.nmix, self.feat_dim, curr_niter + 1),\n print_progress=print_progress)\n mpi = []\n if len(jobs_cpu) > 0:\n # create CPU processes\n mpi = MPI(jobs=[(j, False) for j in jobs_cpu],\n func=map_expectation,\n ncpu=self.ncpu, batch=1, hwm=2**25,\n backend='python')\n # create GPU threads\n gpu_threads = [threading.Thread(target=thread_expectation,\n args=(results, j))\n for j in jobs_gpu]\n # start gpu and cpu threads\n for t in gpu_threads:\n t.start()\n # start the cpu processes\n for res in mpi:\n results.update(res)\n # finish all threads\n for t in gpu_threads:\n t.join()\n # ====== summary ====== #\n Z, F, S, L, nfr = results.stats\n L = L / nfr if nfr > 0 else 0\n results = []\n if zero:\n results.append(Z)\n if first:\n results.append(F)\n if second:\n results.append(S)\n if llk:\n results.append(L)\n return results[0] if len(results) == 1 else results\n\n def maximization(self, Z, F, S, floor_const=None):\n \"\"\"\n Parameters\n ----------\n Z : numpy.ndarray (1, nmix)\n zero statistics\n F : numpy.ndarray (feat_dim, nmix)\n first-order statistics\n S : numpy.ndarray (feat_dim, nmix)\n second-order statistics\n floor_const : {None, small float}\n numerical stablize the sigma (e.g. 1e-3)\n \"\"\"\n last_parameters = [np.array(self.w),\n np.array(self.mean),\n np.array(self.sigma)]\n # TheReduce\n iN = 1. / (Z + EPS)\n self.w = Z / Z.sum()\n self.mean = F * iN\n self.sigma = S * iN - self.mean ** 2\n # applying variance floors\n if floor_const is not None:\n vFloor = self.sigma.dot(self.w.T) * floor_const\n self.sigma = self.sigma.clip(vFloor)\n # IMPORTANT: keep sigma >= 0 for numberical stability\n if np.any(self.sigma == 0.):\n wprint(\"[GMM] Some Sigma elements go to zeros\")\n if np.any(self.sigma < 0.):\n eprint(\"[GMM] Numberical instability, Sigma values went smaller than 0!\")\n # check if rollback\n if self.allow_rollback:\n self.w = last_parameters[0]\n self.mean = last_parameters[1]\n self.sigma = last_parameters[2]\n else:\n self.sigma = np.clip(self.sigma, a_min=0., a_max=np.Inf)\n # check if quit fitting\n if self.exit_on_error:\n self._stop_fitting = True\n # refresh cpu cached value\n self._resfresh_cpu_posterior()\n del last_parameters\n return self\n\n def expectation_maximization(self, X, sad=None, device=None, print_progress=True):\n self.initialize(X)\n curr_nmix = self._curr_nmix\n curr_niter = len(self._llk_hist[curr_nmix]) + 1\n # ====== Expectation ====== #\n start_time = time.time()\n Z, F, S, L = self.expectation(X, sad=sad,\n device=device, print_progress=print_progress)\n time_Estep = time.time() - start_time\n # ====== maximization ====== #\n start_time = time.time()\n self.maximization(Z, F, S)\n time_Mstep = time.time() - start_time\n # store history\n self._llk_hist[self._curr_nmix].append(L)\n # print log\n if print_progress:\n print(\"#mix:%s #iter:%s llk:%s Estep:%s(s) Mstep:%s(s)\" %\n (ctext('%.2d' % curr_nmix, 'cyan'),\n ctext('%.2d' % curr_niter, 'yellow'),\n ctext('%.4f' % L, 'yellow'),\n ctext('%.2f' % time_Estep, 'yellow'),\n ctext('%.4f' % time_Mstep, 'yellow'),\n ))\n # ====== save the checkpoint ====== #\n if self.path is not None:\n with open(self.path, 'wb') as f:\n pickle.dump(self, f)\n return self\n\n def gmm_mixup(self):\n if self._curr_nmix >= self._nmix:\n return\n # ====== create perturb ====== #\n ndim, nmix = self.sigma.shape\n sig_max, arg_max = self.sigma.max(0), self.sigma.argmax(0)\n eps = np.zeros((ndim, nmix), dtype='f')\n eps[arg_max, np.arange(nmix)] = np.sqrt(sig_max)\n perturb = 0.55 * eps\n # ====== double up the components ====== #\n if self._curr_nmix * 2 <= self._nmix:\n self.mean = np.c_[self.mean - perturb, self.mean + perturb]\n self.sigma = np.c_[self.sigma, self.sigma]\n self.w = 0.5 * np.c_[self.w, self.w]\n # ====== if too many components removes to match desire number ====== #\n else:\n # TODO: better strategy for mixup here\n self.mean = np.c_[self.mean - perturb, self.mean + perturb][:, :self.nmix]\n self.sigma = np.c_[self.sigma, self.sigma]\n self.sigma = self.sigma[:, :self.nmix]\n self.w = 0.5 * np.c_[self.w, self.w]\n self.w = self.w[:, :self.nmix]\n # update current number of mixture information\n self._curr_nmix = min(2 * self._curr_nmix, self.nmix)\n self._refresh_gpu_posterior()\n self._resfresh_cpu_posterior()\n # ====== save the checkpoint ====== #\n if self.path is not None:\n with open(self.path, 'wb') as f:\n pickle.dump(self, f)\n return self\n\n# ===========================================================================\n# Tmatrix\n# ===========================================================================\nclass Tmatrix(DensityMixin, BaseEstimator, TransformerMixin):\n \"\"\" Tmatrix training for i-vectors extraction\n based on total varibility space.\n\n Parameters\n ----------\n tv_dim : int\n dimension of T-matrix\n gmm : odin.ml.gmm.GMM\n initialized and fitted GMM\n niter : int (default: 16)\n number of iteration for E-M algorithm\n batch_size : {int, 'auto'}\n if 'auto', used `25 Megabytes` block for batch size.\n dtype : {str, numpy.dtype} (default: float64)\n desire dtype for mean, std, weights and input matrices\n The computation of Tmatrix involves matrices invert, it\n is recommended to keep 'float64' since significant\n amount of computation can be performed on CPU.\n device : {'cpu', 'gpu', 'mix'}\n 'gpu' - run the computaiton on GPU\n 'cpu' - use multiprocessing for multiple cores\n 'mix' - use both GPU and multi-processing\n * It is suggested to use mix of GPU and CPU if you have\n more than 24 cores CPU, otherwise, 'gpu' gives the best\n performance\n ncpu : int (default: 1)\n number of processes for parallel calculating Expectation\n NOTE: it is recommended to keep number of CPU to 1\n since the numpy implementation of matrix invert using\n multi-thread already.\n gpu_factor : int\n how much jobs GPU will handle more than CPU\n (i.e. `njob_gpu = gpu_factor * njob_cpu`)\n cache_path : str\n path to cache folder when fitting\n seed : int\n random seed for reproducible\n path : {str, None}\n If given a path, save the model after everytime its\n parameters changed (i.e. `maximization` is called)\n name : {str, None}\n special name for this `Tmatrix` instance\n\n Attributes\n ----------\n Tm : (tv_dim, feat_dim * nmix)\n latent vector for each mixtures and features\n T_invS : (tv_dim, feat_dim * nmix)\n Tm / GMM.Sigma\n T_invS_Tt : (nmix, tv_dim * (tv_dim + 1) / 2)\n lower half of the inverted T-matrix\n\n Note\n ----\n If you have built numpy with an optimized BLAS like OpenBLAS or\n MKL (which is the case if you got numpy from pypi or anaconda),\n it's likely that the inv operation which is probably the bottleneck\n of your code is already multithreaded.\n Therefore there is no point trying to parallelize on top of that.\n\n You should increase the `batch_size` instead of `ncpu` if there\n are idle resources.\n\n \"\"\"\n\n STANDARD_CPU_BATCH_SIZE = 64 * 1024 * 1024 # 64 Megabytes\n STANDARD_GPU_BATCH_SIZE = 64 * 1024 * 1024 # 64 Megabytes\n\n def __init__(self, tv_dim, gmm, niter=16, dtype='float64',\n batch_size_cpu='auto', batch_size_gpu='auto',\n device='mix', ncpu=1, gpu_factor=3,\n cache_path='/tmp', seed=1234,\n path=None, name=None):\n super(Tmatrix, self).__init__()\n if not (isinstance(gmm, GMM) and gmm.is_initialized and gmm.is_fitted):\n raise ValueError(\"`gmm` must be instance of odin.ml.gmm.GMM \"\n \"both is_initialized and is_fitted.\")\n self._is_fitted = False\n # ====== init ====== #\n self.niter = niter\n self._tv_dim = tv_dim\n self._t2_dim = tv_dim * (tv_dim + 1) // 2\n # ====== setting the gmm ====== #\n self._feat_dim = gmm.feat_dim\n self._nmix = gmm.nmix\n self._gmm = gmm\n # ====== others ====== #\n self._path = path if isinstance(path, string_types) else None\n self._seed = seed\n self._llk_hist = []\n if name is None:\n name = uuid(length=8)\n self._name = 'Tmatrix_%s' % name\n else:\n self._name = str(name)\n if not os.path.isdir(cache_path):\n raise ValueError('`cache_path` must be a directory.')\n self.cache_path = cache_path\n # ====== training ====== #\n self._dtype = np.dtype(dtype)\n # CPU batch\n if isinstance(batch_size_cpu, string_types):\n batch_size_cpu = int(Tmatrix.STANDARD_CPU_BATCH_SIZE /\n ((self.feat_dim * self.nmix * self.dtype.itemsize) +\n (self.nmix * self.dtype.itemsize)))\n self.batch_size_cpu = batch_size_cpu\n # GPU batch\n if isinstance(batch_size_gpu, string_types):\n batch_size_gpu = int(Tmatrix.STANDARD_GPU_BATCH_SIZE /\n ((self.feat_dim * self.nmix * self.dtype.itemsize) +\n (self.nmix * self.dtype.itemsize)))\n self.batch_size_gpu = batch_size_gpu\n # ====== select device ====== #\n self.set_device(device)\n # cpu\n if ncpu is None:\n ncpu = cpu_count() // 2\n self.ncpu = int(ncpu)\n self.gpu_factor = int(gpu_factor)\n # ====== load ubm ====== #\n self.Im = np.eye(self.tv_dim, dtype=self.dtype)\n self.Sigma = np.array(\n gmm.sigma.reshape((1, self.feat_dim * self.nmix), order='F'),\n dtype=self.dtype)\n np.random.seed(self._seed)\n self.Tm = (np.random.randn(self.tv_dim, self.feat_dim * self.nmix) *\n self.Sigma.sum() * 0.001).astype(self.dtype)\n self.T_invS_Tt = np.empty((self.nmix, self.t2_dim), dtype=self.dtype)\n # ====== cache, 10% faster here ====== #\n self._itril = np.tril_indices(self.tv_dim)\n self._Ex_Exx_llk = defaultdictkey(\n lambda nfiles: (np.empty((nfiles, self.tv_dim), dtype=self.dtype),\n np.empty((nfiles, self.t2_dim), dtype=self.dtype),\n np.empty((nfiles, 1), dtype=self.dtype)))\n # ====== calculate stats first ====== #\n self._refresh_T_statistics()\n self._refresh_gpu()\n\n def __getstate__(self):\n return (self.Im, self.Sigma, self.Tm, self._gmm,\n self._tv_dim, self._t2_dim, self._feat_dim, self._nmix,\n self._seed, self._llk_hist,\n self.batch_size_cpu, self.batch_size_gpu,\n self.niter, self.ncpu, self._device, self.gpu_factor,\n self.cache_path, self._dtype,\n self._is_fitted, self._path, self._name)\n\n def __setstate__(self, states):\n (self.Im, self.Sigma, self.Tm, self._gmm,\n self._tv_dim, self._t2_dim, self._feat_dim, self._nmix,\n self._seed, self._llk_hist,\n self.batch_size_cpu, self.batch_size_gpu,\n self.niter, self.ncpu, self._device, self.gpu_factor,\n self.cache_path, self._dtype,\n self._is_fitted, self._path, self._name) = states\n # ====== re-init ====== #\n self.T_invS_Tt = np.empty((self.nmix, self.t2_dim), dtype=self.dtype)\n self._itril = np.tril_indices(self.tv_dim)\n self._Ex_Exx_llk = defaultdictkey(\n lambda nfiles: (np.empty((nfiles, self.tv_dim), dtype=self.dtype),\n np.empty((nfiles, self.t2_dim), dtype=self.dtype),\n np.empty((nfiles, 1), dtype=self.dtype)))\n # ====== calculate stats first ====== #\n self._refresh_T_statistics()\n self._refresh_gpu()\n # ====== warning no GPU ====== #\n if self._device in ('gpu', 'mix') and get_ngpu() == 0:\n wprint(\"Enabled GPU device, but no GPU found!\")\n\n def __str__(self):\n s = '<\"%s\" Tdim:%s nmix:%s ndim:%s niter:%s CPU:%s GPU:%s>' %\\\n (ctext(self.name, 'yellow'),\n ctext(self._tv_dim, 'cyan'),\n ctext(self._nmix, 'cyan'),\n ctext(self._feat_dim, 'cyan'),\n ctext(len(self._llk_hist), 'cyan'),\n ctext(self.batch_size_cpu, 'cyan'),\n ctext(self.batch_size_gpu, 'cyan'),\n )\n return s\n\n # ==================== properties ==================== #\n def set_device(self, device):\n device = str(device).lower()\n if device not in ('cpu', 'gpu', 'mix'):\n raise ValueError(\"`device` must be one of the following: 'cpu', 'gpu', or 'mix'\")\n # ====== warning no GPU ====== #\n if device in ('gpu', 'mix') and get_ngpu() == 0:\n wprint(\"Using GPU device but NO GPU detected, \"\n \"tensorflow will switch to slower CPU computation!\")\n self._device = device\n return self\n\n @property\n def device(self):\n return self._device\n\n @property\n def feat_dim(self):\n return self._feat_dim\n\n @property\n def tv_dim(self):\n return self._tv_dim\n\n @property\n def t2_dim(self):\n return self._t2_dim\n\n @property\n def nmix(self):\n return self._nmix\n\n @property\n def path(self):\n return self._path\n\n @property\n def name(self):\n return self._name\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def gmm(self):\n return self._gmm\n\n @property\n def is_fitted(self):\n return self._is_fitted\n\n # ==================== i-vec ==================== #\n def _refresh_T_statistics(self):\n \"\"\" depend on: Tm and Sigma \"\"\"\n # (tv_dim, feat_dim * nmix)\n self.T_invS = self.Tm / (self.Sigma + EPS)\n # T_invS_Tt: (nmix, tv_dim * (tv_dim + 1) / 2)\n T_invS2 = self.Tm / (np.sqrt(self.Sigma) + EPS)\n # update each row for each mixture\n for mix in range(self.nmix):\n start = self.feat_dim * mix\n end = start + self.feat_dim\n tmp = T_invS2[:, start:end].dot(T_invS2[:, start:end].T)\n self.T_invS_Tt[mix] = tmp[self._itril]\n\n def _refresh_gpu(self):\n if hasattr(self, '_gpu_inputs') and hasattr(self, '_gpu_outputs'):\n return\n with tf.variable_scope(self.name):\n Z = K.placeholder(shape=(None, self.nmix),\n dtype=self.dtype,\n name='ZeroStats')\n F = K.placeholder(shape=(None, self.nmix * self.feat_dim),\n dtype=self.dtype,\n name='FirstStats')\n Tm = K.placeholder(shape=self.Tm.shape,\n dtype=self.dtype,\n name='T_matrix')\n T_invS_Tt = K.placeholder(shape=self.T_invS_Tt.shape,\n dtype=self.dtype,\n name='T_invS_Tt')\n Sigma = tf.constant(value=self.Sigma,\n dtype=self.dtype,\n name='GMM_Sigma')\n Im = tf.eye(self.tv_dim, dtype=self.dtype, name='Im')\n # ====== start the calculation ====== #\n T_invS = Tm / Sigma\n L1 = tf.matmul(Z, T_invS_Tt, name='L1') # (nfiles, t2_dim)\n B1 = tf.matmul(F, tf.transpose(T_invS), name='B1') # (nfiles, tv_dim)\n n_samples = tf.shape(L1)[0]\n IDX = tf.range(start=0, limit=n_samples, dtype='int32')\n\n # ====== repeat for each utterance (file) ====== #\n def map_expectation_fn(idx):\n l1 = L1[idx]\n b1 = B1[idx]\n L = tf.scatter_nd(indices=[(i, j) for i, j in zip(*self._itril)],\n updates=l1,\n shape=(self.tv_dim, self.tv_dim),\n name='L')\n L = L + tf.transpose(K.tril(L, k=-1)) + Im\n # matrix inverse NOT implemented on GPU anyway\n with tf.device('/cpu:0'):\n Cxx = tf.linalg.inv(L)\n B = tf.expand_dims(b1, axis=-1)\n this_Ex = tf.matmul(Cxx, B)\n Ex = tf.transpose(this_Ex)\n llk = -0.5 * tf.matmul(Ex, B - this_Ex) + tf.matmul(Ex, B)\n Exx = tf.gather_nd(params=(Cxx + tf.matmul(this_Ex, Ex)),\n indices=[(i, j) for i, j in zip(*self._itril)])\n return tf.concat((tf.squeeze(llk, axis=0),\n tf.squeeze(Ex, axis=0),\n Exx), axis=0)\n # ====== compute ====== #\n llk_Ex_Exx = tf.map_fn(fn=map_expectation_fn,\n elems=IDX, dtype=self.dtype,\n parallel_iterations=self.batch_size_gpu,\n swap_memory=False,\n back_prop=False)\n llk = llk_Ex_Exx[:, 0]\n Ex = llk_Ex_Exx[:, 1:1 + self.tv_dim]\n Exx = llk_Ex_Exx[:, 1 + self.tv_dim:]\n RU = tf.matmul(tf.transpose(Ex), F)\n LU = tf.matmul(tf.transpose(Z), Exx)\n llk = tf.reduce_sum(llk)\n # ====== assign inputs outputs for expectation step ====== #\n self._gpu_e_inputs = [Z, F, Tm, T_invS_Tt]\n self._gpu_e_outputs = [LU, RU, llk]\n # ====== assign inputs outputs for transforming ====== #\n self._gpu_t_inputs = self._gpu_e_inputs\n self._gpu_t_outputs = [Ex] # use _gpu_e_inputs\n # ==================== GPU maximization ==================== #\n # ML re-estimation of the total subspace matrix or the factor loading\n # matrix\n # (nmix, tdim * (tdim + 1) / 2)\n LU = K.placeholder(shape=(self.nmix, self.t2_dim),\n dtype=self.dtype,\n name='LU_plh')\n # (tdim, nmix * feat_dim)\n RU = K.placeholder(shape=(self.tv_dim, self.nmix * self.feat_dim),\n dtype=self.dtype,\n name='RU_plh')\n # add mixture ID\n MIX_ID = tf.range(start=0, limit=self.nmix, dtype='int32')\n\n # ====== repeat for each mixture ====== #\n def map_maximization_fn(mix):\n # lu\n lu = tf.scatter_nd(indices=[(i, j) for i, j in zip(*self._itril)],\n updates=LU[mix],\n shape=(self.tv_dim, self.tv_dim),\n name='lu')\n lu = lu + tf.transpose(K.tril(lu, k=-1))\n # ru\n ru = RU[:, mix * self.feat_dim: mix * self.feat_dim + self.feat_dim]\n ru.set_shape((self.tv_dim, self.feat_dim))\n # solve, faster when done on CPU for tensorflow\n with tf.device('/cpu:0'):\n t = tf.linalg.solve(matrix=lu, rhs=ru, adjoint=False, name=None)\n return t\n Tm = tf.map_fn(fn=map_maximization_fn,\n elems=MIX_ID, dtype=self.dtype,\n parallel_iterations=self.batch_size_gpu,\n swap_memory=False,\n back_prop=False)\n self._gpu_m_inputs = [LU, RU]\n self._gpu_m_outputs = Tm\n\n def _fast_expectation(self, Z, F, on_gpu):\n nframes = np.ceil(Z.sum())\n nfiles = F.shape[0]\n # ====== GPU ====== #\n if on_gpu:\n LU, RU, llk = K.eval(self._gpu_e_outputs,\n feed_dict={i: j for i, j in zip(self._gpu_e_inputs,\n (Z, F, self.Tm, self.T_invS_Tt))}\n )\n return LU, RU, llk, nframes\n # ====== CPU ====== #\n # (nfiles, tv_dim * (tv_dim + 1) / 2)\n L1 = np.dot(Z, self.T_invS_Tt)\n # (nfiles, tv_dim)\n B1 = np.dot(F, self.T_invS.T)\n Ex, Exx, llk = self._Ex_Exx_llk[nfiles]\n for ix in range(nfiles):\n L = np.zeros((self.tv_dim, self.tv_dim), dtype=self.dtype)\n L[self._itril] = L1[ix]\n L = L + np.tril(L, k=-1).T + self.Im\n Cxx = linalg.inv(L)\n B = B1[ix][:, np.newaxis]\n this_Ex = np.dot(Cxx, B)\n this_ExT = this_Ex.T\n Ex[ix] = this_ExT\n llk[ix] = -0.5 * this_ExT.dot(B - this_Ex) + this_ExT.dot(B)\n Exx[ix] = (Cxx + this_Ex.dot(this_ExT))[self._itril]\n # (tdim, nmix * feat_dim)\n RU = np.dot(Ex.T, F)\n # (nmix, tdim * (tdim + 1) / 2)\n LU = np.dot(Z.T, Exx)\n return LU, RU, llk.sum(), nframes\n\n def expectation(self, Z, F, device=None, print_progress=True):\n \"\"\"\n Return\n ------\n LU : numpy.ndarray (tdim, nmix * feat_dim)\n RU : numpy.ndarray (nmix, tdim * (tdim + 1) / 2)\n llk : scalar (float)\n nframes : scalar (int)\n \"\"\"\n if device is None:\n device = self._device\n nfiles = Z.shape[0]\n # ====== single batch ====== #\n if (nfiles <= self.batch_size_cpu and device == 'cpu') or \\\n (nfiles <= self.batch_size_gpu and device in ('mix', 'gpu')):\n return self._fast_expectation(Z=Z, F=F,\n on_gpu=False if device == 'cpu' else True)\n # ====== multiple batches ====== #\n else:\n def _map_expectation(start, end, on_gpu):\n batch_size = self.batch_size_gpu if on_gpu else self.batch_size_cpu\n for s, e in minibatch(n=end - start, batch_size=batch_size):\n s += start\n e += start\n nfiles = e - s\n yield (self._fast_expectation(Z=Z[s:e], F=F[s:e], on_gpu=on_gpu),\n nfiles)\n\n def _mpi_fn(start_end):\n start, end = start_end\n tmp = [0., 0., 0., 0.] # LU, RU, llk, nframes\n for res, nfiles in _map_expectation(start, end, on_gpu=False):\n yield nfiles\n for i, r in enumerate(res):\n tmp[i] += r\n # LU return size in Gigabytes\n size = array_size(tmp[0]) / (1024 ** 3)\n if size > 1:\n tmp[0] = tmp[0].astype('float32')\n elif size > 2:\n tmp[0] = tmp[0].astype('float16')\n # RU return size in Gigabytes\n size = array_size(tmp[1]) / (1024 ** 3)\n if size > 1:\n tmp[1] = tmp[1].astype('float32')\n elif size > 2:\n tmp[1] = tmp[1].astype('float16')\n yield tmp\n\n def _thread_fn(start_end):\n start, end = start_end\n tmp = [0., 0., 0., 0.] # LU, RU, llk, nframes\n for res, nfiles in _map_expectation(start, end, on_gpu=True):\n results.update(nfiles)\n for i, r in enumerate(res):\n tmp[i] += r\n results.update(tmp)\n # ====== prepare the jobs ====== #\n jobs_cpu, jobs_gpu = _split_jobs(n_samples=nfiles, ncpu=self.ncpu,\n device=device,\n gpu_factor=self.gpu_factor)\n # LU, RU, llk, nframes\n results = _ExpectationResults(n_samples=nfiles, nb_results=4,\n name=\"[Tmatrix] Tdim:%d nmix:%d feat_dim:%d iter:%d\" %\n (self.tv_dim, self.nmix, self.feat_dim,\n len(self._llk_hist) + 1),\n print_progress=print_progress)\n # ====== create gpu thread ====== #\n mpi = MPI(jobs=jobs_cpu, func=_mpi_fn,\n ncpu=self.ncpu, batch=1, hwm=2**25)\n # yield in _map_expectation, make it become a generator\n threads = [threading.Thread(target=_thread_fn, args=(j,))\n for j in jobs_gpu]\n # start gpu and threads\n for t in threads:\n t.start()\n # run the mpi\n for r in mpi:\n if not is_number(r):\n # r is downsample to prevent overloading multiprocessing Pipe\n r = [i.astype(self.dtype)\n if isinstance(i, np.ndarray) and i.dtype != self.dtype\n else i\n for i in r]\n results.update(r)\n # finish all threads\n for t in threads:\n t.join()\n # return\n return results.stats\n\n def maximization(self, LU, RU, nframes=None,\n min_div_est=True, orthogonalize=True):\n # the call to maximization always update the T-matrix\n # hence, the model is fitted.\n self._is_fitted = True\n # ML re-estimation of the total subspace matrix or the factor loading\n # matrix\n # ====== Multi-processing on CPU ====== #\n prog = Progbar(target=self.nmix,\n print_report=True,\n print_summary=False,\n name=\"[Tmatrix] Maximization #mix:%d #iter:%d device:%s\" %\n (self.nmix, len(self._llk_hist),\n 'CPU' if self.device == 'cpu' else 'GPU'))\n if self.device == 'cpu':\n for mix in range(self.nmix):\n prog.add(1)\n lu = np.zeros((self.tv_dim, self.tv_dim), dtype=self.dtype)\n lu[self._itril] = LU[mix, :]\n lu += np.tril(lu, -1).T\n start = self.feat_dim * mix\n end = start + self.feat_dim\n self.Tm[:, start:end] = linalg.solve(lu, RU[:, start:end])\n # ====== on GPU ====== #\n else:\n Tm = K.eval(self._gpu_m_outputs,\n feed_dict={i: j for i, j in zip(self._gpu_m_inputs,\n (LU, RU))})\n for mix, solution in enumerate(Tm):\n start = self.feat_dim * mix\n end = start + self.feat_dim\n self.Tm[:, start:end] = solution\n # ====== min_div_est ====== #\n if min_div_est:\n if nframes is None:\n raise ValueError(\"`nframes` must be specified if `min_div_est=True`\")\n lu = np.zeros((self.tv_dim, self.tv_dim))\n lu[self._itril] = LU.sum(0) / nframes\n lu += np.tril(lu, -1).T\n self.Tm = np.dot(linalg.cholesky(lu), self.Tm)\n # ====== orthogonalize the columns ====== #\n if orthogonalize:\n U_, s_, V_ = linalg.svd(self.Tm, full_matrices=False)\n self.Tm = np.diag(s_).dot(V_)\n # refresh stats\n self.Tm = self.Tm.astype(self.dtype)\n self._refresh_T_statistics()\n return self\n\n def expectation_maximization(self, Z, F, device=None, print_progress=True):\n nfiles = Z.shape[0]\n # ====== Expectation ====== #\n start_time = time.time()\n LU, RU, LLK, nframes = self.expectation(Z=Z, F=F, device=device,\n print_progress=print_progress)\n time_Estep = time.time() - start_time\n # ====== maximization ====== #\n start_time = time.time()\n self.maximization(LU, RU, nframes,\n min_div_est=True, orthogonalize=True)\n time_Mstep = time.time() - start_time\n # store history\n LLK = LLK / nfiles\n self._llk_hist.append(LLK)\n # print log\n if print_progress:\n print(\"T-dim:%s #iter:%s llk:%s Estep:%s(s) Mstep:%s(s)\" %\n (ctext('%d' % self.tv_dim, 'cyan'),\n ctext('%.2d' % len(self._llk_hist), 'yellow'),\n ctext('%.4f' % LLK, 'yellow'),\n ctext('%.2f' % time_Estep, 'yellow'),\n ctext('%.4f' % time_Mstep, 'yellow'),\n ))\n # ====== save the checkpoint ====== #\n if self.path is not None:\n with open(self.path, 'wb') as f:\n pickle.dump(self, f)\n return self\n\n # ==================== sklearn ==================== #\n def transform(self, X):\n \"\"\" Extract i-vector from trained T-matrix\n\n Parameters\n ----------\n X : {tuple, list, numpy.ndarray, odin.fuel.data.MmapArray}\n if tuple or list is given, the inputs include:\n Z-[1, nmix]; F-[1, nmix*feat_dim]\n if numpy.ndarray is given, shape must be [n_samples, feat_dim]\n\n Return\n ------\n I-vector : (1, tv_dim)\n\n Note\n ----\n No need to parallel this function, `numpy.linalg.inv` is\n already a multi-threaded method, and will be bottleneck\n for `multiprocessing`\n\n \"\"\"\n # ====== GMM transform ====== #\n if isinstance(X, (tuple, list)):\n Z, F = X\n assert Z.ndim == 2 and Z.shape[1] == self.nmix, \\\n \"Zero-th order statistics must be 2-D matrix, and `Z.shape=[?, %d]; but given: %s\" % \\\n (self.nmix, str(Z.shape))\n assert F.ndim == 2 and F.shape[1] == self.nmix * self.feat_dim, \\\n \"First order statistics must be 2-D matrix, and `F.shape=[?, %d]; but given: %s\" % \\\n (self.nmix * self.feat_dim, str(F.shape))\n else:\n Z, F = self.gmm.transform(X)\n # ====== pass ====== #\n L = np.zeros((self.tv_dim, self.tv_dim),\n dtype=self.dtype)\n L[self._itril] = np.dot(Z, self.T_invS_Tt)\n L += np.tril(L, -1).T + self.Im\n # (tv_dim, tv_dim)\n Cxx = linalg.inv(L)\n # (tv_dim, 1)\n B = np.dot(self.T_invS, F.T)\n # (tv_dim, 1)\n Ex = np.dot(Cxx, B)\n # (1, tv_dim)\n return Ex.T\n\n def transform_to_disk(self, Z, F, path=None,\n dtype='float32', device='gpu', ncpu=None,\n override=True):\n \"\"\" Same as `transform`, however, save the transformed statistics\n to file using `odin.fuel.MmapArray`\n\n Parameters\n ----------\n Z : {None, numpy.ndarray, odin.fuel.data.MmapArray}\n array of zero-th order statistic [n_samples, nmix]\n F : {None, numpy.ndarray, odin.fuel.data.MmapArray}\n array of first-th order statistic [n_samples, nmix * feat_dim]\n path : {str, None}\n if str, saving path for extracted i-vector, otherwise,\n return numpy.ndarray for the i-vector\n\n Return\n ------\n i-vector : (1, tv_dim)\n\n Note\n ----\n this function return i-vectors in the same order provided\n by `Z` and `F`\n Calculation on `gpu` is approximated to the results from\n `cpu` that satisfied `np.allclose(gpu, cpu, rtol=1.e-5, atol=1.e-4)`,\n the final performance using cosine scoring, GMM and PLDA is identical.\n \"\"\"\n if device is None:\n device = self._device\n dtype = self.dtype if dtype is None else np.dtype(dtype)\n # ====== prepare inputs ====== #\n if Z is not None and F is not None:\n n_samples = Z.shape[0]\n if Z.shape[0] != F.shape[0]:\n raise ValueError(\"Number of samples in `Z` is %d which is different \"\n \"from %d samples in `F`\" % (Z.shape[0], F.shape[0]))\n else:\n raise ValueError(\"Input arguments must contain `X` and `indices`, or \"\n \"`Z` and `F`.\")\n # ====== Progbar ====== #\n prog = Progbar(target=n_samples,\n print_report=True, print_summary=True,\n name=\"Extracting %d-D i-vector\" % self.tv_dim)\n # ====== init data files ====== #\n if path is not None:\n if os.path.exists(path) and override:\n os.remove(path)\n dat = MmapArray(path=path, dtype=dtype,\n shape=(n_samples, self.tv_dim),\n read_only=False)\n else:\n dat = np.empty(shape=(n_samples, self.tv_dim),\n dtype=dtype)\n # ====== run on GPU ====== #\n if (device == 'gpu' or device == 'mix') and get_ngpu() > 0:\n for s, e in minibatch(batch_size=self.batch_size_gpu, n=n_samples):\n z_minibatch = Z[s:e]\n f_minibatch = F[s:e]\n Ex = K.eval(self._gpu_t_outputs,\n feed_dict={i: j for i, j in zip(self._gpu_t_inputs,\n (z_minibatch, f_minibatch, self.Tm, self.T_invS_Tt))}\n )\n prog.add(Ex[0].shape[0])\n dat[s:e] = Ex[0]\n # ====== run on CPU ====== #\n else:\n def extract_ivec(idx):\n vecs = []\n for i in idx:\n L = np.zeros((self.tv_dim, self.tv_dim),\n dtype=self.dtype)\n L[self._itril] = np.dot(Z[i:i + 1], self.T_invS_Tt)\n L += np.tril(L, -1).T + self.Im\n # (tv_dim, tv_dim)\n Cxx = linalg.inv(L)\n # (tv_dim, 1)\n B = np.dot(self.T_invS, F[i:i + 1].T)\n # (tv_dim, 1)\n Ex = np.dot(Cxx, B)\n # (1, tv_dim)\n ivec = Ex.T\n if ivec.dtype != dtype:\n ivec = ivec.astype(dtype)\n vecs.append((i, ivec))\n return vecs\n mpi = MPI(jobs=list(range(n_samples)), func=extract_ivec,\n ncpu=self.ncpu if ncpu is None else int(ncpu),\n batch=max(12, self.batch_size_cpu))\n for vecs in mpi:\n for i, v in vecs:\n dat[i:i + 1] = v\n prog.add(len(vecs))\n # ====== flush and close ====== #\n if path is not None:\n dat.flush()\n dat.close()\n return MmapArray(path=path, read_only=True)\n return dat\n\n def fit(self, X, y=None):\n \"\"\" Extract i-vector from trained T-matrix\n\n Parameters\n ----------\n X : {tuple, list; or numpy.ndarray}\n if tuple or list is given, the inputs include:\n Z-(1, nmix); F-(1, nmix*feat_dim)\n if numpy.ndarray and indices is given, shape must\n be (n, feat_dim), and the indices is list of dictionary\n representing the mapping: 'name' -> (start, end)\n \"\"\"\n randID = uuid(length=12)\n cache_Z = os.path.join(self.cache_path, 'Z_%s' % randID)\n cache_F = os.path.join(self.cache_path, 'F_%s' % randID)\n try:\n # ====== preprocessing inputs ====== #\n if not isinstance(X, (tuple, list)) and len(X) != 2:\n raise ValueError(\"`X` must be tuple or list of length 2.\")\n ### given X and indices\n if any((hasattr(i, 'shape') and i.shape[1] == self.feat_dim) for i in X) and \\\n any(isinstance(i, (tuple, list, Mapping)) for i in X):\n tmp = [i for i in X\n if hasattr(i, 'shape') and i.shape[1] == self.feat_dim][0]\n indices = [i for i in X if i != tmp][0]\n X = tmp\n self.gmm.transform_to_disk(X, indices, pathZ=cache_Z, pathF=cache_F,\n dtype='float32', device=None,\n override=True)\n Z = MmapArray(cache_Z, read_only=True)\n F = MmapArray(cache_F, read_only=True)\n ### given Z and F\n elif any(i.shape[1] == self.nmix for i in X) and \\\n any(i.shape[1] == self.feat_dim * self.nmix for i in X):\n Z = [i for i in X if i.shape[1] == self.nmix][0]\n F = [i for i in X if i.shape[1] == self.nmix * self.feat_dim][0]\n else:\n raise ValueError(\"The input arguments must be tuple of (Z, F) or (X, indices).\")\n # ====== EM ====== #\n # LU, RU, LLK, nframes\n for iter in range(self.niter):\n self.expectation_maximization(Z, F, device=self._device,\n print_progress=True)\n # ====== exception ====== #\n finally:\n if os.path.exists(cache_Z):\n os.remove(cache_Z)\n if os.path.exists(cache_F):\n os.remove(cache_F)\n"
] |
[
[
"tensorflow.random.experimental.Generator.from_seed",
"tensorflow.sparse.to_dense",
"tensorflow.strings.unicode_split",
"numpy.logical_and",
"numpy.asarray",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.quantile",
"sklearn.feature_extraction.text.TfidfVectorizer",
"tensorflow.data.Dataset.zip",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.array",
"numpy.sum"
],
[
"numpy.square",
"numpy.abs",
"matplotlib.pyplot.cm.get_cmap",
"numpy.unique",
"numpy.min",
"numpy.linspace",
"numpy.squeeze",
"numpy.arange",
"matplotlib.colors.Normalize",
"numpy.full",
"numpy.full_like",
"numpy.max",
"matplotlib.pyplot.colorbar",
"numpy.mean",
"numpy.prod",
"numpy.array",
"numpy.random.RandomState",
"matplotlib.pyplot.cm.ScalarMappable"
],
[
"numpy.diag",
"numpy.dot",
"tensorflow.device",
"scipy.linalg.svd",
"numpy.sqrt",
"numpy.linspace",
"tensorflow.reduce_sum",
"numpy.dtype",
"numpy.concatenate",
"numpy.max",
"tensorflow.map_fn",
"numpy.random.randn",
"numpy.any",
"numpy.exp",
"numpy.tril",
"tensorflow.linalg.inv",
"numpy.clip",
"numpy.tril_indices",
"numpy.reshape",
"numpy.eye",
"numpy.arange",
"tensorflow.squeeze",
"scipy.linalg.inv",
"numpy.zeros",
"scipy.linalg.solve",
"numpy.log",
"tensorflow.matmul",
"tensorflow.pow",
"numpy.power",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.exp",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"tensorflow.linalg.solve",
"tensorflow.reduce_max",
"numpy.log2",
"tensorflow.transpose",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.range",
"tensorflow.eye",
"tensorflow.expand_dims",
"numpy.ones",
"scipy.linalg.cholesky",
"tensorflow.log",
"tensorflow.variable_scope",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
michaelnowotny/cocos
|
[
"3c34940d7d9eb8592a97788a5df84b8d472f2928",
"3c34940d7d9eb8592a97788a5df84b8d472f2928"
] |
[
"cocos/tests/test_numerics/test_data/test_squeeze_reshape.py",
"cocos/numerics/random.py"
] |
[
"import cocos.device\nimport cocos.numerics as cn\nimport numpy as np\nimport pytest\n\n\ntest_data = [np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 20]],\n dtype=np.int32),\n np.array([[0.2, 1.0, 0.5],\n [0.4, 0.5, 0.6],\n [0.7, 0.2, 0.25]],\n dtype=np.float32),\n np.array([[0.5, 2.3, 3.1],\n [4, 5.5, 6],\n [7 - 9j, 8 + 1j, 2 + 10j]],\n dtype=np.complex64),\n np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 20]], dtype=np.int32),\n np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 20]],\n dtype=np.int32)\n ]\n\n\[email protected](\"A_numpy\", test_data)\ndef test_squeeze_reshape(A_numpy):\n cocos.device.init()\n newshapes = [(3, 1, 3), (1, 3, 3)]\n axess = [(None, 1), (None, 0)]\n\n for newshape, axes in zip(newshapes, axess):\n A_cocos = cn.array(A_numpy)\n\n # 3, 1, 3\n B_numpy = A_numpy.reshape(newshape)\n B_cocos = A_cocos.reshape(newshape)\n\n assert np.allclose(B_cocos, B_numpy)\n\n for axis in axes:\n C_numpy = B_numpy.squeeze(axis=axis)\n C_cocos = B_cocos.squeeze(axis=axis)\n\n assert np.allclose(C_cocos, C_numpy)\n\n\ndef main():\n test_squeeze_reshape(test_data[0])\n\n\nif __name__ == '__main__':\n main()\n",
"import arrayfire as af\nfrom arrayfire.library import Dtype\nimport collections\nimport math\nimport numpy as np\nfrom types import ModuleType\nimport typing as tp\nfrom ._arith import \\\n exp, \\\n log, \\\n minimum, \\\n sqrt, \\\n minimum\n\nfrom ._array import asscalar, ndarray\nfrom ._conversion import \\\n convert_numpy_to_af_type, \\\n convert_af_to_numpy_type\n\nfrom cocos.numerics.linalg import cholesky\n\nfrom cocos.options import \\\n GPUOptions, \\\n RandomNumberGenerator\n\n\nSIZE_TYPE = tp.Optional[tp.Union[int, tp.Sequence]]\n\n\ndef map_rng_to_random_engine(rng: RandomNumberGenerator):\n if rng == RandomNumberGenerator.PHILOX_4X32_10:\n return af.random.RANDOM_ENGINE.PHILOX_4X32_10\n elif rng == RandomNumberGenerator.THREEFRY_2X32_16:\n return af.random.RANDOM_ENGINE.THREEFRY_2X32_16\n elif rng == RandomNumberGenerator.MERSENNE_GP11213:\n return af.random.RANDOM_ENGINE.MERSENNE_GP11213\n elif rng == RandomNumberGenerator.PHILOX:\n return af.random.RANDOM_ENGINE.PHILOX\n elif rng == RandomNumberGenerator.THREEFRY:\n return af.random.RANDOM_ENGINE.THREEFRY\n elif rng == RandomNumberGenerator.DEFAULT:\n return af.random.RANDOM_ENGINE.DEFAULT\n else:\n raise ValueError(\"The requested random number generator \"\n \"is not supported.\")\n\n\n# initialized default random number engine\nrandom_engine \\\n = af.random.Random_Engine(\n engine_type=map_rng_to_random_engine(GPUOptions.default_rng))\n\n\n################################################################################\n# functions to get and set the seed\n################################################################################\n\ndef seed(seed: tp.Optional[int] = None):\n \"\"\"\n Seed the generator.\n \"\"\"\n\n if seed is None:\n seed = 0\n af.set_seed(seed)\n\n\ndef get_seed() -> int:\n \"\"\"\n Returns the current seed of the generator.\n \"\"\"\n\n return af.get_seed()\n\n\n################################################################################\n# supporting functions for antithetic random numbers\n################################################################################\n\ndef get_antithetic_slices(shape: tp.Sequence[int],\n antithetic_dimension: int) \\\n -> tp.Tuple[slice, ...]:\n \"\"\"\n This function generates a tuple of slices to index the original array of\n random numbers to take either half or one less than half of the the original\n random numbers along the antithetic dimension and all of the random numbers\n along the other dimensions.\n \"\"\"\n\n slices = []\n\n for axis, dimension in enumerate(shape):\n if axis == antithetic_dimension:\n s = slice(0, math.floor(dimension / 2), 1)\n else:\n s = slice(0, dimension, 1)\n\n slices.append(s)\n\n return tuple(slices)\n\n\ndef verify_shape_and_antithetic_dimension(shape: tp.Sequence[int],\n antithetic_dimension: tp.Optional[\n int] = None):\n \"\"\"\n This function makes sure that the length shape argument is between 1 and 4\n and checks that the antithetic dimension is one of the dimensions in the\n shape argument.\n \"\"\"\n\n if len(shape) > 4:\n raise ValueError('arrays with more than 4 axes are not supported')\n\n if len(shape) < 1:\n raise ValueError('array must have at least one axis')\n\n if antithetic_dimension < 0 or antithetic_dimension > len(shape) - 1:\n raise ValueError(\n f'antithetic dimension must be None or between 0 and {len(shape)}')\n\n\n################################################################################\n# Basic continuous random number generators\n################################################################################\n\ndef rand(d0: int,\n d1: tp.Optional[int] = None,\n d2: tp.Optional[int] = None,\n d3: tp.Optional[int] = None,\n dtype: np.generic = np.float32) -> ndarray:\n \"\"\"\n Random values in a given shape.\n \"\"\"\n\n af_type = convert_numpy_to_af_type(dtype)\n af_array = af.data.randu(d0, d1, d2, d3, dtype=af_type)\n\n return ndarray(af_array)\n\n\ndef randn(d0: int,\n d1: tp.Optional[int] = None,\n d2: tp.Optional[int] = None,\n d3: tp.Optional[int] = None,\n dtype: np.generic = np.float32):\n \"\"\"\n Return a sample (or samples) from the “standard normal” distribution.\n \"\"\"\n\n af_type = convert_numpy_to_af_type(dtype)\n af_array = af.data.randn(d0, d1, d2, d3, dtype=af_type)\n return ndarray(af_array)\n\n\ndef _random_with_dtype_internal(shape: tp.Sequence[int],\n rng_function: tp.Callable,\n dtype: np.generic = np.float32,\n num_pack: ModuleType = np):\n draw_shape = list(shape)\n\n if num_pack == np:\n x = rng_function(*draw_shape)\n if x.dtype != dtype:\n x = x.astype(dtype)\n else:\n x = rng_function(*draw_shape, dtype=dtype)\n\n return x\n\n\ndef rand_with_dtype(shape: tp.Sequence[int],\n dtype: np.generic = np.float32,\n num_pack: ModuleType = np):\n return _random_with_dtype_internal(shape=shape,\n rng_function=num_pack.random.rand,\n dtype=dtype,\n num_pack=num_pack)\n\n\ndef randn_with_dtype(shape: tp.Sequence[int],\n dtype: np.generic = np.float32,\n num_pack: ModuleType = np):\n return _random_with_dtype_internal(shape=shape,\n rng_function=num_pack.random.randn,\n dtype=dtype,\n num_pack=num_pack)\n\n\ndef randn_antithetic(shape: tp.Sequence[int],\n antithetic_dimension: tp.Optional[int] = None,\n dtype: np.generic = np.float32,\n num_pack: ModuleType = np):\n verify_shape_and_antithetic_dimension(shape, antithetic_dimension)\n draw_shape = list(shape)\n\n if antithetic_dimension is not None:\n # adjust dimension over which antithetic random numbers are to be drawn\n draw_shape[antithetic_dimension] \\\n = math.ceil(shape[antithetic_dimension] / 2)\n\n # draw original random numbers\n if num_pack == np:\n z = num_pack.random.randn(*draw_shape)\n if z.dtype != dtype:\n z = z.astype(dtype)\n else:\n z = num_pack.random.randn(*draw_shape, dtype=dtype)\n\n if antithetic_dimension is not None:\n # reflect random numbers at 0 and concatenate to original random numbers\n slices = get_antithetic_slices(shape, antithetic_dimension)\n z = num_pack.concatenate((z, -z[slices]),\n axis=antithetic_dimension)\n\n return z\n\n\ndef rand_antithetic(shape: tp.Sequence[int],\n antithetic_dimension: tp.Optional[int] = None,\n dtype: np.generic = np.float32,\n num_pack: ModuleType = np):\n verify_shape_and_antithetic_dimension(shape, antithetic_dimension)\n draw_shape = list(shape)\n\n if antithetic_dimension is not None:\n # adjust dimension over which antithetic random numbers are to be drawn\n draw_shape[antithetic_dimension] \\\n = math.ceil(shape[antithetic_dimension] / 2)\n\n # draw original random numbers\n if num_pack == np:\n u = num_pack.random.rand(*draw_shape)\n if u.dtype != dtype:\n u = u.astype(dtype)\n else:\n u = num_pack.random.rand(*draw_shape, dtype=dtype)\n\n if antithetic_dimension is not None:\n # reflect random numbers at 0 and concatenate to original random numbers\n slices = get_antithetic_slices(shape, antithetic_dimension)\n u = num_pack.concatenate((u, 1.0 - u[slices]),\n axis=antithetic_dimension)\n\n return u\n\n\n################################################################################\n# Basic discrete random number generators\n################################################################################\n\ndef randint(low: int,\n high: tp.Optional[int] = None,\n size: tp.Optional[tp.Union[tp.Tuple[int, ...], int]] = None,\n dtype: np.generic = np.int32) \\\n -> ndarray:\n \"\"\"\n Draws an array of random integers ranging from low to high-1 of the\n specified shape.\n\n :param low: lowest number to draw\n :param high: highest integer to draw (excluding)\n :param size: shape of output array\n :param dtype: data type of integer to be generated\n :return: an ndarray of random integers\n \"\"\"\n if not high:\n high = low\n low = 0\n\n if not size:\n size = (1,)\n elif isinstance(size, int):\n size = (size,)\n\n n = np.prod(size)\n divisor = 1.0 / (high - low)\n\n u = rand(n)\n u = minimum(u, 1.0 - np.finfo(np.float32).eps)\n if dtype != np.int32:\n i = (u / divisor).astype(np.int64) + low\n i = i.astype(dtype)\n else:\n i = (u / divisor).astype(np.int32) + low\n\n return i.reshape(size)\n\n\ndef choice(a: ndarray,\n size: tp.Optional[tp.Union[tp.Tuple[int, ...], int]] = None,\n replace: bool = True,\n p: tp.Optional[ndarray] = None) -> ndarray:\n if p:\n raise ValueError('p != None is not supported')\n\n if not replace:\n raise ValueError('replace=False is not supported')\n\n i = randint(0, a.size, size=size)\n\n if not isinstance(size, int):\n return a[i].reshape(size)\n else:\n return a[i]\n\n\ndef _draw_and_reshape(size: SIZE_TYPE,\n rng_func: tp.Callable[[int], ndarray]) \\\n -> ndarray:\n if not size:\n n = 1\n elif isinstance(size, int):\n n = size\n elif isinstance(size, (list, tuple)):\n n = np.prod(size)\n else:\n raise TypeError(\"size must be either of type int or tuple\")\n\n random_numbers = rng_func(n)\n\n if size is None:\n random_numbers = asscalar(random_numbers)\n elif not isinstance(size, int):\n random_numbers = random_numbers.reshape(size)\n\n return random_numbers\n\n\ndef uniform(low: float = 0.0,\n high: float = 1.0,\n size: tp.Optional[SIZE_TYPE] = None):\n \"\"\"\n Draw samples from a uniform distribution.\n \"\"\"\n\n if high < low:\n raise ValueError(\"high must not be less than low\")\n\n u = _draw_and_reshape(size, rand)\n return u * (high - low) + low\n\n\ndef _exponential_internal(scale: float,\n n: int,\n antithetic: bool = False) -> ndarray:\n u = rand(n)\n u = minimum(u, 1.0 - np.finfo(np.float32).eps)\n x: ndarray = log(1.0 - u) * (-scale)\n return x\n\n\ndef exponential(scale: float=1.0,\n size: tp.Optional[SIZE_TYPE] = None,\n antithethic: bool = False) -> ndarray:\n return _draw_and_reshape(size,\n lambda n: _exponential_internal(\n scale=scale,\n n=n,\n antithetic=antithethic))\n\n\ndef standard_exponential(size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return exponential(size=size)\n\n\n################################################################################\n# gamma random number generator by Marsaglia and Tsang\n# using Cocos vectorization\n################################################################################\n\ndef gamma_rand_marsaglia_and_tsang_arrayfire(alpha: float,\n lambda_: float,\n n: int) \\\n -> af.array:\n random_numbers = af.constant(0, n, dtype=Dtype.f32)\n # Gamma(alpha, lambda) generator using Marsaglia and Tsang method\n # Algorithm 4.33\n if alpha >= 1.0:\n d = alpha - 1 / 3\n c = 1.0 / np.sqrt(9.0 * d)\n\n number_generated = 0\n number_generated_total = 0\n\n while number_generated < n:\n number_left = n - number_generated\n\n z = af.randn(number_left, dtype=Dtype.f32)\n y = (1.0 + c * z)\n v = y * y * y\n\n accept_index_1 = ((z >= -1.0 / c) & (v > 0.0))\n z_accept_1 = z[accept_index_1]\n # del z\n v_accept_1 = v[accept_index_1]\n # del v\n u_accept_1 = af.randu(v_accept_1.elements(), dtype=Dtype.f32)\n # del U\n\n accept_index_2 = \\\n u_accept_1 < af.exp((0.5 * z_accept_1 * z_accept_1 + d - d * v_accept_1 + d * af.log(v_accept_1)))\n\n x_accept = d * v_accept_1[accept_index_2] / lambda_\n number_accept = x_accept.elements()\n\n random_numbers[number_generated:np.minimum(n, number_generated + number_accept)] = \\\n x_accept[0:np.minimum(number_left, number_accept)]\n\n number_generated += number_accept\n number_generated_total += number_left\n\n if GPUOptions.verbose:\n print(f\"Acceptance ratio = {n/number_generated_total}\")\n else:\n random_numbers = gamma_rand_marsaglia_and_tsang_arrayfire(alpha + 1, lambda_, n)\n random_numbers *= af.randu(n, dtype=Dtype.f32) ** (1.0 / alpha)\n\n return random_numbers\n\n\ndef gamma(shape: float,\n scale: float = 1.0,\n size: tp.Optional[SIZE_TYPE] = None) \\\n -> ndarray:\n def fun(n: int):\n return ndarray(gamma_rand_marsaglia_and_tsang_arrayfire(\n alpha=shape,\n lambda_=1.0/scale, n=n))\n\n return _draw_and_reshape(size, lambda n: fun(n))\n\n\ndef standard_gamma(shape: float,\n size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return gamma(shape, size=size)\n\n\ndef chisquare(df, size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return gamma(df / 2.0, 2.0, size)\n\n\ndef _beta_internal(a: float,\n b: float,\n n: int) -> ndarray:\n X = gamma(a, 1.0, n)\n Y = gamma(b, 1.0, n)\n return X / (X + Y)\n\n\ndef beta(a: float,\n b: float,\n size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return _draw_and_reshape(size, lambda n: _beta_internal(a, b, n))\n\n\ndef _wald_internal(mu: float,\n LAMBDA: float,\n n: int) -> ndarray:\n v = randn(n)\n u = rand(n)\n\n y = v * v\n del v\n x = mu + mu ** 2 / (2.0 * LAMBDA) * y - mu / (2.0 * LAMBDA) * sqrt(4.0 * mu * LAMBDA * y + mu ** 2.0 * y * y)\n reject_index = u > (mu / (mu + x))\n x[reject_index] = mu ** 2 / x[reject_index]\n return x\n\n\ndef wald(mean: float,\n scale: float,\n size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return _draw_and_reshape(size, lambda n: _wald_internal(mean, scale, n))\n\n\ndef normal(loc: float = 0.0,\n scale: float = 1.0,\n size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return _draw_and_reshape(size, lambda n: loc + scale * randn(n))\n\n\ndef standard_normal(size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return _draw_and_reshape(size, randn)\n\n\ndef lognormal(mean: float = 0.0,\n sigma: float = 1.0,\n size: tp.Optional[SIZE_TYPE] = None) -> ndarray:\n return exp(normal(mean, sigma, size))\n\n\ndef _logistic_internal(loc: float,\n scale: float,\n n: int) -> ndarray:\n u = rand(n)\n u = minimum(u, 1.0 - np.finfo(np.float32).eps)\n x: ndarray = loc - scale * log(1.0 / u - 1.0)\n return x\n\n\ndef logistic(loc: float = 0.0,\n scale: float = 1.0,\n size: tp.Optional[SIZE_TYPE] = None):\n return _draw_and_reshape(size, lambda n: _logistic_internal(loc, scale, n))\n\n\ndef multivariate_normal(mean, cov, size: tp.Sequence[int]) \\\n -> ndarray:\n d = len(mean)\n if not isinstance(size, collections.abc.Iterable):\n size = [size]\n\n if not isinstance(size, collections.abc.Sequence):\n size = list(size)\n\n if not cov.shape == (d, d):\n raise ValueError('mean and cov must be a arrays with shapes (d, ) and '\n '(d, d) respectively')\n\n draw_shape = list(size)\n draw_shape.append(d)\n n = int(np.prod(size))\n if mean.dtype != cov.dtype:\n raise ValueError('the dtypes of mean and cov mjust match')\n\n z = randn(n, d, dtype=cov.dtype)\n cov_cholesky = cholesky(cov).T\n x = z @ cov_cholesky\n\n return x.reshape(draw_shape)\n"
] |
[
[
"numpy.array",
"numpy.allclose"
],
[
"numpy.minimum",
"numpy.sqrt",
"numpy.prod",
"numpy.finfo"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mjredmond/FEMApp
|
[
"dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8"
] |
[
"fem/utilities/tables/table_data/_table_np_data_list.py"
] |
[
"\"\"\"\ntable_data.table_data_list\n\ntable data list\n\nauthor: Michael Redmond\n\n\"\"\"\n\nfrom __future__ import print_function, absolute_import\n\nfrom ._table_data import TableData, DummyTableData\n\nfrom fem.utilities import MrSignal\nfrom fem.utilities.error_handling import MyExceptions\nfrom fem.utilities.debug import debuginfo, show_caller\n\nfrom ._table_data_list import TableDataList\n\nimport numpy as np\nimport base64\nfrom fem.utilities.zipper import compress, decompress\n\n\nclass TableNumpyDataList(TableDataList):\n CheckDataType = None\n DefaultDataType = None\n\n def __init__(self, data_id=None):\n super(TableNumpyDataList, self).__init__(data_id)\n\n self.dtype = self.DefaultDataType.dtype\n\n self.blank_data = np.zeros(1, dtype=self.dtype)\n\n self.data = np.zeros(0, dtype=self.dtype)\n\n self._headers = list(self.data.dtype.names)[:self.DefaultDataType.columns()]\n\n self.list_changed = MrSignal()\n\n def clear(self):\n self.data.resize(0)\n\n def get_data(self):\n return self.data.tolist()\n\n def load_data(self, data):\n np.copyto(self.data, data)\n\n def add(self, data=None):\n\n self.data.resize(self.data.size + 1, refcheck=False)\n\n if data is not None:\n self.data[-1] = data\n\n self.list_changed.emit()\n\n return tuple(self.data[-1])\n\n def remove(self, index):\n if isinstance(index, (list, tuple)):\n i1 = index[0]\n i2 = index[1]\n else:\n i1 = index\n i2 = index\n\n indices = list(range(i1, i2 + 1))\n\n tmp = []\n\n for i in indices:\n tmp.append(tuple(self.data[i]))\n\n self.data = np.delete(self.data, indices)\n\n self.list_changed.emit()\n\n return tmp\n\n def insert(self, index, data=None):\n\n if data is not None:\n assert isinstance(data, tuple)\n try:\n if isinstance(data[0], tuple):\n return self._insert_multiple(index, data)\n except IndexError:\n data = None\n\n if index < 0:\n index = 0\n\n if index >= self.data.size:\n return None\n\n if data is None:\n data = tuple(self.blank_data[0])\n\n self.data = np.insert(self.data, index, data)\n\n self.list_changed.emit()\n\n return tuple(self.data[index])\n\n def editable_columns(self):\n return set(range(len(self.headers)))\n\n def _insert_multiple(self, index, data):\n if index < 0:\n index = 0\n\n if index >= len(self.data) + 1:\n raise IndexError('%d' % index)\n\n self.list_changed.block()\n\n for data_ in data:\n self.data = np.insert(self.data, index, data_)\n\n self.list_changed.unblock()\n\n self.list_changed.emit()\n\n return data\n\n def serialize(self):\n data_i = self.DefaultDataType(self, 0)\n\n for i in range(self.data.shape[0]):\n data_i.index = i\n data_i.serialize()\n\n return base64.b64encode(compress(self.data.tobytes())).decode()\n\n def load(self, data):\n try:\n self.data = np.fromstring(decompress(base64.b64decode(data.encode())), dtype=self.dtype)\n except ValueError:\n print('get rid of this')\n return\n\n data_i = self.DefaultDataType(self, 0)\n\n for i in range(self.data.shape[0]):\n data_i.index = i\n data_i.load(self.data[i])\n\n # np.copyto(self._data, np.fromstring(base64.b64decode(data.encode()), dtype=self.dtype))\n\n def __getitem__(self, index):\n if isinstance(index, str):\n index = self.ids().index(index)\n\n return self.DefaultDataType(self, index)\n\n def set_data(self, index, value):\n row, column = index\n\n _data = self.DefaultDataType(self, row)\n\n try:\n old_value = _data[column]\n _data[column] = value\n new_value = _data[column]\n\n if old_value != new_value:\n return True, old_value, new_value\n else:\n return False, None, None\n except (MyExceptions.IndexError, MyExceptions.ValueError):\n return False, None, None\n\n @show_caller\n def __setitem__(self, index, data):\n assert isinstance(data, tuple)\n\n self.data[index] = data\n\n def id_exists(self, id_):\n\n data_i = self.DefaultDataType(self, 0)\n\n for i in range(self.data.shape[0]):\n data_i.index = i\n if data_i.id == id_:\n return True\n\n return False\n\n def subdata(self, index):\n return None\n\n def has_subdata(self):\n return None\n\n def find_index(self, data_id):\n assert isinstance(data_id, str)\n\n data_i = self.DefaultDataType(self, 0)\n\n for i in range(self.data.shape[0]):\n data_i.index = i\n if data_i.id == data_id:\n return i\n\n return -1\n\n def ids(self):\n\n data_i = self.DefaultDataType(self, 0)\n\n ids_ = []\n\n for i in range(self.data.shape[0]):\n data_i.index = i\n ids_.append(data_i.id)\n\n return ids_\n\n def _move(self, i1, i2):\n _data_i1 = tuple(self.data[i1])\n _data_i2 = tuple(self.data[i2])\n\n self.data[i1], self.data[i2] = _data_i2, _data_i1\n\n del self._ids[:]\n\n self.list_changed.emit()\n\n def shape(self):\n return self.data.shape[0], self.DefaultDataType.columns()\n\n @property\n def size(self):\n return self.__len__()\n\n def __len__(self):\n return self.data.shape[0]\n"
] |
[
[
"numpy.copyto",
"numpy.delete",
"numpy.zeros",
"numpy.insert"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Kuga23/Deep-Learning
|
[
"86980338208c702b6bfcbcfffdb18498e389a56b"
] |
[
"Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_relu.py"
] |
[
"import unittest\nimport numpy as np\nfrom modules import ReLU\nfrom .utils import *\n\nclass TestReLU(unittest.TestCase):\n \"\"\" The class containing all test cases for this assignment\"\"\"\n\n def setUp(self):\n \"\"\"Define the functions to be tested here.\"\"\"\n pass\n\n def _relu_forward(self, x):\n relu = ReLU()\n return relu.forward(x)\n\n def test_forward(self):\n x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)\n relu = ReLU()\n out = relu.forward(x)\n correct_out = np.array([[0., 0., 0., 0., ],\n [0., 0., 0.04545455, 0.13636364, ],\n [0.22727273, 0.31818182, 0.40909091, 0.5, ]])\n diff = rel_error(out, correct_out)\n self.assertAlmostEquals(diff, 0, places=7)\n\n\n def test_backward(self):\n x = np.random.randn(10, 10)\n dout = np.random.randn(*x.shape)\n\n dx_num = eval_numerical_gradient_array(lambda x: self._relu_forward(x), x, dout)\n\n relu = ReLU()\n out = relu.forward(x)\n relu.backward(dout)\n dx = relu.dx\n\n self.assertAlmostEquals(rel_error(dx_num, dx), 0, places=7)\n\n\n"
] |
[
[
"numpy.array",
"numpy.random.randn",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
taoshen58/ReSAN
|
[
"f65f3fe656907be0ec14ddf18cd7d2608e7ef905",
"f65f3fe656907be0ec14ddf18cd7d2608e7ef905"
] |
[
"resan/resa.py",
"SICK_rl_pub/src/nn_utils/integration_func.py"
] |
[
"import tensorflow as tf\n\nfrom resan.utils.nn import bn_dense_layer, dropout, linear\nfrom resan.utils.general import exp_mask_for_high_rank, mask_for_high_rank\nfrom resan.rl_nn import reduce_data_rep_max_len\n\n\ndef reinforced_self_attention(\n rep_tensor, rep_mask, dep_selection, head_selection,\n hn=None, keep_unselected=True,\n scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'\n):\n with tf.variable_scope(scope or 'reinforced_self_attention'):\n fw_result = directional_attention_with_selections(\n rep_tensor, rep_mask, dep_selection, head_selection,\n 'forward', hn, keep_unselected,\n 'forward_resa', keep_prob, is_train, wd, activation\n )\n bw_result = directional_attention_with_selections(\n rep_tensor, rep_mask, dep_selection, head_selection,\n 'backward', hn, keep_unselected,\n 'backward_resa', keep_prob, is_train, wd, activation\n )\n return tf.concat([fw_result, bw_result], -1)\n\n\ndef directional_attention_with_selections(\n rep_tensor, rep_mask, dep_selection, head_selection, direction=None, hn=None, keep_unselected=True,\n scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'):\n\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n org_ivec = rep_tensor.get_shape().as_list()[2]\n ivec = hn or org_ivec\n\n with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'):\n # non-linear\n rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation,\n False, wd, keep_prob, is_train)\n # ensure the seletion is right\n dep_selection = tf.logical_and(rep_mask, dep_selection)\n head_selection = tf.logical_and(rep_mask, head_selection)\n rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection)\n rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection)\n sl_dep, sl_head = tf.shape(rep_dep_tensor)[1], tf.shape(rep_head_tensor)[1]\n\n if keep_unselected:\n unhead_selection = tf.logical_and(rep_mask, tf.logical_not(head_selection))\n rep_unhead_tensor, rep_unhead_mask, unhead_org_idx = reduce_data_rep_max_len(rep_map, unhead_selection)\n sl_unhead = tf.shape(rep_unhead_tensor)[1]\n\n attn_result = tf.cond(\n tf.equal(sl_head, 0),\n lambda: tf.zeros([bs, 0, hn], tf.float32),\n lambda: self_attention_for_selected_head(\n head_selection, head_org_idx, sl_head, rep_head_mask,\n dep_selection, dep_org_idx, sl_dep, rep_dep_mask,\n rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec\n )\n )\n\n if keep_unselected:\n input_idx = tf.tile(tf.expand_dims(tf.range(sl), 0), [bs, 1])\n pooling_result = tf.cond(\n tf.equal(sl_unhead, 0),\n lambda: tf.zeros([bs, 0, hn], tf.float32),\n lambda: mean_pooling_for_unselected_head(\n unhead_org_idx, sl_unhead, rep_unhead_mask,\n input_idx, sl, rep_mask, rep_map, None) # todo: point !\n )\n\n with tf.variable_scope('output'):\n if keep_unselected:\n range_head = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_head])\n scatter_attn = tf.cond(\n tf.equal(sl_head, 0),\n lambda: tf.zeros([bs, sl+1, hn], tf.float32),\n lambda: tf.scatter_nd(\n tf.stack([range_head, head_org_idx], -1), attn_result, [bs, sl+1, hn])\n )\n\n range_unhead = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_unhead])\n scatter_pooling = tf.cond(\n tf.equal(sl_unhead, 0),\n lambda: tf.zeros([bs, sl+1, hn], tf.float32),\n lambda: tf.scatter_nd(\n tf.stack([range_unhead, unhead_org_idx], -1), pooling_result, [bs, sl+1, hn])\n )\n\n self_attn_input = rep_map\n context_features = tf.add(scatter_attn[:, :-1], scatter_pooling[:, :-1], 'context_features')\n output_mask = rep_mask\n else:\n self_attn_input = rep_head_tensor\n context_features = attn_result\n output_mask = rep_head_mask\n\n # context fusion gate\n o_bias = tf.get_variable('o_bias', [ivec], tf.float32, tf.constant_initializer(0.))\n fusion_gate = tf.nn.sigmoid(\n linear(self_attn_input, ivec, True, 0., 'linear_fusion_i', False, wd, keep_prob, is_train) +\n linear(context_features, ivec, True, 0., 'linear_fusion_a', False, wd, keep_prob, is_train) +\n o_bias)\n output = fusion_gate * self_attn_input + (1 - fusion_gate) * context_features\n\n return output, output_mask\n\n\ndef self_attention_for_selected_head(\n head_selection, head_org_idx, sl_head, rep_head_mask,\n dep_selection, dep_org_idx, sl_dep, rep_dep_mask,\n rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec\n):\n # data for self-attention\n rep_map_dp = dropout(rep_map, keep_prob, is_train)\n rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection)\n rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection)\n\n # mask generation\n dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1])\n head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep])\n\n if direction is None:\n direct_mask = tf.not_equal(head_idxs, dep_idxs) # [bs, slh, sld]\n else:\n if direction == 'forward':\n direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld]\n else:\n direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld]\n # [bs, slh, slh]\n rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2))\n attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld]\n\n # tensor tile\n rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec\n with tf.variable_scope('attention'): # bs,sl,sl,vec\n f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))\n dependent = linear(rep_dep_tensor_dp, ivec, False, scope='linear_dependent') # bs,sld,vec\n dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sld,vec\n head = linear(rep_head_tensor_dp, ivec, False, scope='linear_head') # bs,slh,vec\n head_etd = tf.expand_dims(head, 2) # bs,slh,1,vec\n\n logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,slh,sld,vec\n logits_masked = exp_mask_for_high_rank(logits, attn_mask) # bs,slh,sld,vec\n attn_score = tf.nn.softmax(logits_masked, 2) # bs,slh,sld,vec\n attn_score = mask_for_high_rank(attn_score, attn_mask)\n attn_result = tf.reduce_sum(attn_score * rep_map_tile, 2) # bs,slh,vec -> head_org_idx\n return attn_result\n\n\ndef mean_pooling_for_unselected_head(\n unhead_org_idx, sl_unhead, rep_unhead_mask,\n dep_org_idx, sl_dep, rep_dep_mask,\n rep_dep_tensor, direction\n):\n with tf.name_scope('pooling_for_un_head'):\n undep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_unhead, 1]) # [bs, sluh, sld]\n unhead_idxs = tf.tile(tf.expand_dims(unhead_org_idx, 2), [1, 1, sl_dep]) # [bs, sluh, sld]\n if direction is None:\n direct_mask_un = tf.not_equal(unhead_idxs, undep_idxs) # [bs, sluh, sld]\n else:\n if direction == 'forward':\n direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld]\n else:\n direct_mask_un = tf.less(unhead_idxs, undep_idxs) # [bs, sluh, sld]\n\n # [bs, sluh, sld]\n rep_mask_tile_un = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_unhead_mask, 2))\n pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld]\n\n # data for pooling\n pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn\n # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn]\n pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn]\n pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn]\n pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh]\n pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den)\n\n pooling_result = pooling_data_sum / tf.cast(pooling_den, tf.float32)\n return pooling_result\n\n\ndef scaled_tanh(x, scale=5.):\n return scale * tf.nn.tanh(1./scale * x)",
"from src.nn_utils.general import get_last_state, exp_mask_for_high_rank, mask_for_high_rank\nfrom src.nn_utils.nn import linear, get_logits, pooling_with_mask, softsel, feature_combination, dropout,\\\n bn_dense_layer\nfrom src.nn_utils.rnn_cell import SwitchableDropoutWrapper\nfrom src.nn_utils.rnn import dynamic_rnn, bidirectional_dynamic_rnn\nimport tensorflow as tf\nfrom src.nn_utils.general import get_last_state, add_reg_without_bias\n\n\ndef traditional_attention(rep_tensor, rep_mask, scope=None,\n keep_prob=1., is_train=None, wd=0., activation='elu',\n tensor_dict=None, name=None):\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n ivec = rep_tensor.get_shape()[2]\n with tf.variable_scope(scope or 'traditional_attention'):\n rep_tensor_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation,\n False, wd, keep_prob, is_train)\n\n rep_tensor_logits = get_logits([rep_tensor_map], None, False, scope='self_attn_logits',\n mask=rep_mask, input_keep_prob=keep_prob, is_train=is_train) # bs,sl\n attn_res = softsel(rep_tensor, rep_tensor_logits, rep_mask) # bs,vec\n\n # save attn\n if tensor_dict is not None and name is not None:\n tensor_dict[name] = tf.nn.softmax(rep_tensor_logits)\n\n return attn_res\n\n\ndef multi_dimensional_attention(rep_tensor, rep_mask, scope=None,\n keep_prob=1., is_train=None, wd=0., activation='elu',\n tensor_dict=None, name=None):\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n ivec = rep_tensor.get_shape()[2]\n with tf.variable_scope(scope or 'multi_dimensional_attention'):\n map1 = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map1', activation,\n False, wd, keep_prob, is_train)\n map2 = bn_dense_layer(map1, ivec, True, 0., 'bn_dense_map2', 'linear',\n False, wd, keep_prob, is_train)\n map2_masked = exp_mask_for_high_rank(map2, rep_mask)\n\n soft = tf.nn.softmax(map2_masked, 1) # bs,sl,vec\n attn_output = tf.reduce_sum(soft * rep_tensor, 1) # bs, vec\n\n # save attn\n if tensor_dict is not None and name is not None:\n tensor_dict[name] = soft\n\n return attn_output\n\n\ndef directional_attention_with_dense(rep_tensor, rep_mask, direction=None, scope=None,\n keep_prob=1., is_train=None, wd=0., activation='elu',\n extra_mask=None,\n tensor_dict=None, name=None):\n def scaled_tanh(x, scale=5.):\n return scale * tf.nn.tanh(1./scale * x)\n\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n ivec = rep_tensor.get_shape()[2]\n with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'):\n # mask generation\n sl_indices = tf.range(sl, dtype=tf.int32)\n sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices)\n if direction is None:\n direct_mask = tf.cast(tf.diag(- tf.ones([sl], tf.int32)) + 1, tf.bool)\n else:\n if direction == 'forward':\n direct_mask = tf.greater(sl_row, sl_col)\n else:\n direct_mask = tf.greater(sl_col, sl_row)\n direct_mask_tile = tf.tile(tf.expand_dims(direct_mask, 0), [bs, 1, 1]) # bs,sl,sl\n rep_mask_tile = tf.tile(tf.expand_dims(rep_mask, 1), [1, sl, 1]) # bs,sl,sl\n attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile) # bs,sl,sl\n if extra_mask is not None:\n attn_mask = tf.logical_and(attn_mask, extra_mask)\n\n # non-linear\n rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation,\n False, wd, keep_prob, is_train)\n rep_map_tile = tf.tile(tf.expand_dims(rep_map, 1), [1, sl, 1, 1]) # bs,sl,sl,vec\n rep_map_dp = dropout(rep_map, keep_prob, is_train)\n\n # attention\n with tf.variable_scope('attention'): # bs,sl,sl,vec\n f_bias = tf.get_variable('f_bias',[ivec], tf.float32, tf.constant_initializer(0.))\n dependent = linear(rep_map_dp, ivec, False, scope='linear_dependent') # bs,sl,vec\n dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sl,vec\n head = linear(rep_map_dp, ivec, False, scope='linear_head') # bs,sl,vec\n head_etd = tf.expand_dims(head, 2) # bs,sl,1,vec\n\n logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,sl,sl,vec\n\n logits_masked = exp_mask_for_high_rank(logits, attn_mask)\n attn_score = tf.nn.softmax(logits_masked, 2) # bs,sl,sl,vec\n attn_score = mask_for_high_rank(attn_score, attn_mask)\n\n attn_result = tf.reduce_sum(attn_score * rep_map_tile, 2) # bs,sl,vec\n\n with tf.variable_scope('output'):\n o_bias = tf.get_variable('o_bias',[ivec], tf.float32, tf.constant_initializer(0.))\n # input gate\n fusion_gate = tf.nn.sigmoid(\n linear(rep_map, ivec, True, 0., 'linear_fusion_i', False, wd, keep_prob, is_train) +\n linear(attn_result, ivec, True, 0., 'linear_fusion_a', False, wd, keep_prob, is_train) +\n o_bias)\n output = fusion_gate * rep_map + (1-fusion_gate) * attn_result\n output = mask_for_high_rank(output, rep_mask)\n\n # save attn\n if tensor_dict is not None and name is not None:\n tensor_dict[name + '_dependent'] = dependent\n tensor_dict[name + '_head'] = head\n tensor_dict[name] = attn_score\n tensor_dict[name + '_gate'] = fusion_gate\n return output\n\n\n# -------------- rnn --------------\ndef contextual_bi_rnn(tensor_rep, mask_rep, hn, cell_type, only_final=False,\n wd=0., keep_prob=1.,is_train=None, scope=None):\n \"\"\"\n fusing contextual information using bi-direction rnn\n :param tensor_rep: [..., sl, vec]\n :param mask_rep: [..., sl]\n :param hn:\n :param cell_type: 'gru', 'lstm', basic_lstm' and 'basic_rnn'\n :param only_final: True or False\n :param wd:\n :param keep_prob:\n :param is_train:\n :param scope:\n :return:\n \"\"\"\n with tf.variable_scope(scope or 'contextual_bi_rnn'): # correct\n reuse = None if not tf.get_variable_scope().reuse else True\n #print(reuse)\n if cell_type == 'gru':\n cell_fw = tf.contrib.rnn.GRUCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.GRUCell(hn, reuse=reuse)\n elif cell_type == 'lstm':\n cell_fw = tf.contrib.rnn.LSTMCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.LSTMCell(hn, reuse=reuse)\n elif cell_type == 'basic_lstm':\n cell_fw = tf.contrib.rnn.BasicLSTMCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.BasicLSTMCell(hn, reuse=reuse)\n elif cell_type == 'basic_rnn':\n cell_fw = tf.contrib.rnn.BasicRNNCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.BasicRNNCell(hn, reuse=reuse)\n else:\n raise AttributeError('no cell type \\'%s\\'' % cell_type)\n cell_dp_fw = SwitchableDropoutWrapper(cell_fw,is_train,keep_prob)\n cell_dp_bw = SwitchableDropoutWrapper(cell_bw,is_train,keep_prob)\n\n tensor_len = tf.reduce_sum(tf.cast(mask_rep, tf.int32), -1) # [bs]\n\n (outputs_fw, output_bw), _ = bidirectional_dynamic_rnn(\n cell_dp_fw, cell_dp_bw, tensor_rep, tensor_len,\n dtype=tf.float32)\n rnn_outputs = tf.concat([outputs_fw,output_bw],-1) # [...,sl,2hn]\n\n if wd > 0:\n add_reg_without_bias()\n if not only_final:\n return rnn_outputs # [....,sl, 2hn]\n else:\n return get_last_state(rnn_outputs, mask_rep) # [...., 2hn]\n\n\n# -------------- emb mat--------------\ndef generate_embedding_mat(dict_size, emb_len, init_mat=None, extra_mat=None,\n extra_trainable=False, scope=None):\n \"\"\"\n generate embedding matrix for looking up\n :param dict_size: indices 0 and 1 corresponding to empty and unknown token\n :param emb_len:\n :param init_mat: init mat matching for [dict_size, emb_len]\n :param extra_mat: extra tensor [extra_dict_size, emb_len]\n :param extra_trainable:\n :param scope:\n :return: if extra_mat is None, return[dict_size+extra_dict_size,emb_len], else [dict_size,emb_len]\n \"\"\"\n with tf.variable_scope(scope or 'gene_emb_mat'):\n emb_mat_ept_and_unk = tf.constant(value=0, dtype=tf.float32, shape=[2, emb_len])\n if init_mat is None:\n emb_mat_other = tf.get_variable('emb_mat',[dict_size - 2, emb_len], tf.float32)\n else:\n emb_mat_other = tf.get_variable(\"emb_mat\",[dict_size - 2, emb_len], tf.float32,\n initializer=tf.constant_initializer(init_mat[2:], dtype=tf.float32,\n verify_shape=True))\n emb_mat = tf.concat([emb_mat_ept_and_unk, emb_mat_other], 0)\n\n if extra_mat is not None:\n if extra_trainable:\n extra_mat_var = tf.get_variable(\"extra_emb_mat\",extra_mat.shape, tf.float32,\n initializer=tf.constant_initializer(extra_mat,\n dtype=tf.float32,\n verify_shape=True))\n return tf.concat([emb_mat, extra_mat_var], 0)\n else:\n #with tf.device('/cpu:0'):\n extra_mat_con = tf.constant(extra_mat, dtype=tf.float32)\n return tf.concat([emb_mat, extra_mat_con], 0)\n else:\n return emb_mat\n\n\n"
] |
[
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.equal",
"tensorflow.cast",
"tensorflow.greater",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.logical_not",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.nn.tanh",
"tensorflow.not_equal",
"tensorflow.nn.softmax",
"tensorflow.range",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.logical_and"
],
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.contrib.rnn.BasicRNNCell",
"tensorflow.greater",
"tensorflow.shape",
"tensorflow.nn.tanh",
"tensorflow.meshgrid",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.constant_initializer",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.logical_and"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
18279406017/code-of-csdn
|
[
"0c22f3abda9605f9a46e4f639739904ed271e6d7"
] |
[
"Traffic Light Detection using Python OpenCV/TLState.py"
] |
[
"import cv2\r\nimport random\r\nimport numpy as np\r\nfrom enum import Enum\r\nfrom detectColor import detectColor\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nclass TLState(Enum):\r\n red = 1\r\n yellow = 2\r\n green = 3\r\n red_yellowArrow = 4\r\n red_greenArrow = 5\r\n green_yellowArrow = 6\r\n green_greenArrow = 7\r\n redArrow = 8\r\n yellowArrow = 9\r\n greenArrow = 10\r\n flashingYellowArrow = 11\r\n\r\nclass TLType(Enum):\r\n regular = 0\r\n five_lights = 1\r\n four_lights = 2\r\n\r\ndef imgResize(image, height, inter = cv2.INTER_AREA):\r\n # initialize the dimensions of the image to be resized and grab the image size\r\n dim = None\r\n (h, w) = image.shape[:2]\r\n # calculate the ratio of the height and construct the dimensions\r\n r = height / float(h)\r\n dim = (int(w * r), height)\r\n # resize the image\r\n resized = cv2.resize(image, dim, interpolation = inter)\r\n # return the resized image\r\n return resized\r\n\r\ndef detectState(image, TLType):\r\n image = imgResize(image, 200)\r\n (height, width) = image.shape[:2]\r\n output = image.copy()\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n # 霍夫圆环检测\r\n circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,20,\r\n param1=50,param2=30,minRadius=15,maxRadius=30)\r\n overallState = 0\r\n stateArrow = 0\r\n stateSolid = 0\r\n if circles is not None:\r\n circles = np.uint16(np.around(circles))\r\n\r\n for i in circles[0,:]:\r\n if i[1] < i[2]:\r\n i[1] = i[2]\r\n roi = image[(i[1]-i[2]):(i[1]+i[2]),(i[0]-i[2]):(i[0]+i[2])]\r\n color = detectColor(roi)\r\n if color > 0:\r\n if TLType == 1 and i[0] < width/2 and i[1] > height/3:\r\n stateArrow = color\r\n elif TLType == 2:\r\n stateArrow = color\r\n if i[1] > height/2 and i[1] < height/4*3:\r\n stateArrow = color + 2\r\n else:\r\n stateSolid = color\r\n\r\n if TLType == 1:\r\n overallState = stateArrow + stateSolid + 1\r\n elif TLType == 2:\r\n overallState = stateArrow + 7\r\n else:\r\n overallState = stateSolid\r\n\r\n return overallState\r\n\r\ndef plot_light_result(images):\r\n\r\n for i, image in enumerate(images):\r\n plt.subplot(1, len(images), i+1)\r\n lena = mpimg.imread(image)\r\n label = TLState(detectState(cv2.imread(image),TLType.regular.value)).name\r\n plt.title(label)\r\n plt.imshow(lena)\r\n plt.show()\r\n\r\nlight_path = [\"images/red.jpg\",\"images/green.png\", \"images/yellow.png\"]\r\nrandom.shuffle(light_path)\r\nplot_light_result(light_path)\r\n\r\ndef plot_arrow_result(images):\r\n\r\n for i, image in enumerate(images):\r\n plt.subplot(1, len(images), i+1)\r\n lena = mpimg.imread(image)\r\n label = TLState(detectState(cv2.imread(image),TLType.five_lights.value)).name\r\n plt.title(label)\r\n plt.imshow(imgResize(lena, 200))\r\n plt.show()\r\n\r\narrow_path = [\"images/red_greenarrow.png\", \"images/red_yellowarrow.png\"]\r\nrandom.shuffle(arrow_path)\r\nplot_arrow_result(arrow_path)\r\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.around",
"matplotlib.image.imread",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TomArrow/ebu_adm_renderer
|
[
"c09691610d42aaa42f1a547ab85a468d33dfe0e3",
"c09691610d42aaa42f1a547ab85a468d33dfe0e3"
] |
[
"da_ear/common.py",
"da_ear/core/util.py"
] |
[
"from attr import attrs, attrib\nfrom attr.validators import instance_of\nimport numpy as np\n\n\ndef validate_range(minimum, maximum):\n def f(instance, attribute, value):\n if not (minimum <= value <= maximum):\n raise ValueError('value \"%s\" out of range ( % s, % s)'\n % (value, minimum, maximum))\n return f\n\n\ndef list_of(type):\n \"\"\"Attrs validator that checks for a list containing only a given type.\n\n Parameters:\n type: expected type of list items\n\n Returns:\n function: Validation function as required by the attr.attrib validator\n argument.\n \"\"\"\n list_validator = instance_of(list)\n\n def f(inst, attr, value):\n list_validator(inst, attr, value)\n\n for item in value:\n if not isinstance(item, type):\n raise TypeError(\n \"'{name}' items must be {type!r} (got {item!r} that is a \"\n \"{actual!r}).\"\n .format(name=attr.name, type=type,\n actual=item.__class__, item=item),\n attr, type, item,\n )\n return f\n\n\ndef cart(az, el, dist, axis=-1):\n \"\"\"Convert ADM-format polar positions to ADM-format Cartesian.\n\n Parameters:\n az: Azimuths in degrees, angle measured anticlockwise from front.\n el: Elevations in degrees, angle measured up from equator.\n r: Radii.\n axis: Index of the new axis in the result; see :func:`numpy.stack`. -1\n (default) adds a new axis at the end.\n\n Returns:\n ndarray: Same shape as broadcasting az, el and r together, with a new\n axis at `axis` containing the X, Y and Z coordinates.\n\n Examples:\n >>> cart(0, 0, 1)\n array([0., 1., 0.])\n >>> cart(90, 0, 1).round(6)\n array([-1., 0., 0.])\n >>> cart(0, 90, 1).round(6)\n array([0., 0., 1.])\n >>> # inputs are broadcast together...\n >>> cart([0, 90], 0, 1).round(6)\n array([[ 0., 1., 0.],\n [-1., 0., 0.]])\n >>> # ... along the given axis\n >>> cart([0, 90], 0, 1, axis=0).round(6)\n array([[ 0., -1.],\n [ 1., 0.],\n [ 0., 0.]])\n \"\"\"\n az, el, dist = np.broadcast_arrays(az, el, dist)\n\n return np.stack((np.sin(np.radians(-az)) * np.cos(np.radians(el)) * dist,\n np.cos(np.radians(-az)) * np.cos(np.radians(el)) * dist,\n np.sin(np.radians(el)) * dist),\n axis=axis)\n\n\ndef azimuth(positions, axis=-1):\n \"\"\"Get the azimuth in degrees from ADM-format Cartesian positions.\n\n Parameters:\n positions (array of float): Cartesian positions, with X, Y and Z\n positions along axis `axis`.\n axis (int): Axis to find coordinates along. -1 (default) indicates the\n last axis.\n\n Returns:\n array: Azimuths of the positions in degrees; has the same shape as\n positions, with `axis` removed.\n\n Raises:\n ValueError: If positions does not have the right length along axis.\n\n Examples:\n\n >>> azimuth([0, 1, 0]).round(0).astype(int)\n 0\n >>> azimuth([[1, 0, 0], [0, 1, 0]]).round(0).astype(int)\n array([-90, 0])\n >>> azimuth([[1, 0], [0, 1], [0, 0]], axis=0).round(0).astype(int)\n array([-90, 0])\n \"\"\"\n x, y, z = np.moveaxis(positions, axis, 0)\n return -np.degrees(np.arctan2(x, y))\n\n\ndef elevation(positions, axis=-1):\n \"\"\"Get the elevation in degrees from ADM-format Cartesian positions.\n\n See :func:`azimuth`.\n \"\"\"\n x, y, z = np.moveaxis(positions, axis, 0)\n radius = np.hypot(x, y)\n return np.degrees(np.arctan2(z, radius))\n\n\ndef distance(positions, axis=-1):\n \"\"\"Get the distance from ADM-format Cartesian positions.\n\n See :func:`azimuth`.\n \"\"\"\n return np.linalg.norm(positions, axis=axis)\n\n\nclass PolarPositionMixin(object):\n \"\"\"Methods to be defined on all polar position objects which have azimuth,\n elevation and distance attributes.\"\"\"\n __slots__ = ()\n\n def as_cartesian_array(self):\n \"\"\"Get the position as a Cartesian array.\n\n Returns:\n np.array of shape (3,): equivalent X, Y and Z coordinates\n \"\"\"\n return cart(self.azimuth, self.elevation, self.distance)\n\n def as_cartesian_position(self) -> \"CartesianPosition\":\n \"\"\"Get the equivalent cartesian position.\"\"\"\n x, y, z = self.as_cartesian_array()\n return CartesianPosition(x, y, z)\n\n @property\n def norm_position(self):\n return cart(self.azimuth, self.elevation, 1.0)\n\n\nclass CartesianPositionMixin(object):\n \"\"\"Methods to be defined on all Cartesian position objects which have X, Y\n and Z attributes.\"\"\"\n __slots__ = ()\n\n def as_cartesian_array(self):\n \"\"\"Get the position as a Cartesian array.\n\n Returns:\n np.array of shape (3,): equivalent X, Y and Z coordinates\n \"\"\"\n return np.array([self.X, self.Y, self.Z])\n\n def as_polar_position(self) -> \"PolarPosition\":\n \"\"\"Get the equivalent cartesian position.\"\"\"\n cart_array = self.as_cartesian_array()\n return PolarPosition(azimuth(cart_array), elevation(cart_array), distance(cart_array))\n\n\nclass Position(object):\n \"\"\"A 3D position represented in polar or Cartesian coordinates.\"\"\"\n __slots__ = ()\n\n\n@attrs(slots=True)\nclass PolarPosition(Position, PolarPositionMixin):\n \"\"\"A 3D position represented in ADM-format polar coordinates.\n\n Attributes:\n azimuth (float): anti-clockwise azimuth in degrees, measured from the\n front\n elevation (float): elevation in degrees, measured upwards from the\n equator\n distance (float): distance relative to the audioPackFormat\n absoluteDistance parameter\n \"\"\"\n\n azimuth = attrib(converter=float, validator=validate_range(-180, 180))\n elevation = attrib(converter=float, validator=validate_range(-90, 90))\n distance = attrib(converter=float, validator=validate_range(0, float('inf')),\n default=1.0)\n\n\n@attrs(slots=True)\nclass CartesianPosition(Position, CartesianPositionMixin):\n \"\"\"A 3D position represented in ADM-format Cartesian coordinates.\n\n Attributes:\n X (float): left-to-right position, from -1 to 1\n Y (float): back-to-front position, from -1 to 1\n Z (float): bottom-to-top position, from -1 to 1\n \"\"\"\n\n X = attrib(converter=float)\n Y = attrib(converter=float)\n Z = attrib(converter=float)\n\n\n@attrs(slots=True, frozen=True)\nclass CartesianScreen(object):\n \"\"\"ADM screen representation using Cartesian coordinates.\n\n This is used to represent the audioProgrammeReferenceScreen, as well as the\n screen position in the reproduction room.\n\n Attributes:\n aspectRatio (float): aspect ratio\n centrePosition (CartesianPosition): screenCentrePosition element\n widthX (float): screenWidth X attribute\n \"\"\"\n aspectRatio = attrib(validator=instance_of(float))\n centrePosition = attrib(validator=instance_of(CartesianPosition))\n widthX = attrib(validator=instance_of(float))\n\n\n@attrs(slots=True, frozen=True)\nclass PolarScreen(object):\n \"\"\"ADM screen representation using Cartesian coordinates.\n\n This is used to represent the audioProgrammeReferenceScreen, as well as the\n screen position in the reproduction room.\n\n Attributes:\n aspectRatio (float): aspect ratio\n centrePosition (PolarPosition): screenCentrePosition element\n widthX (float): screenWidth azimuth attribute\n \"\"\"\n aspectRatio = attrib(validator=instance_of(float))\n centrePosition = attrib(validator=instance_of(PolarPosition))\n widthAzimuth = attrib(validator=instance_of(float))\n\n\ndefault_screen = PolarScreen(aspectRatio=1.78,\n centrePosition=PolarPosition(\n azimuth=0.0,\n elevation=0.0,\n distance=1.0),\n widthAzimuth=58.0)\n\"\"\"The default screen position, size and shape.\"\"\"\n",
"import numpy as np\nfrom attr import attrs, attrib\n\n\ndef has_shape(*shape):\n \"\"\"Attrs validator that checks that a numpy array has the given shape.\n\n Parameters:\n *shape: shape to match against; any elements that are None are ignored\n and may be any length.\n\n Returns:\n function: Validation function as rquired by the attr.attrib validator\n argument.\n\n Example:\n >>> @attrs\n ... class Test(object):\n ... x = attrib(validator=has_shape(None, 2))\n >>> Test(np.array([[1,2]]))\n Test(x=array([[1, 2]]))\n >>> Test(np.array([]))\n Traceback (most recent call last):\n ...\n ValueError: (\"'x' must be of shape (None, 2) which array([]...\n \"\"\"\n def f(inst, attr, value):\n if (len(value.shape) != len(shape) or\n any(dim_b is not None and dim_a != dim_b\n for dim_a, dim_b in zip(value.shape, shape))):\n raise ValueError(\n \"'{name}' must be of shape {shape} which {value!r} isn't.\"\n .format(name=attr.name, shape=shape, value=value),\n attr, shape, value,\n )\n return f\n\n\ndef as_array(**kwargs):\n \"\"\"Make an attrs conversion function that calls np.asarray with the\n provided arguments.\n\n Example:\n >>> @attrs\n ... class Test(object):\n ... x = attrib(converter=as_array(dtype=float))\n >>> Test([1])\n Test(x=array([1.]))\n \"\"\"\n def f(x):\n return np.asarray(x, **kwargs)\n return f\n\n\ndef safe_norm_position(position):\n \"\"\"\n Parameters:\n position (array of shape (3,)): Position to normalise.\n\n Returns:\n array of shape (3,): normalised position\n \"\"\"\n norm = np.linalg.norm(position)\n if norm < 1e-10:\n return np.array([0.0, 1.0, 0.0])\n else:\n return position / norm\n\n\ndef interp_sorted(x, xp, yp):\n \"\"\"same as np.interp, but checks that xp is sorted\"\"\"\n xp = np.array(xp)\n assert np.all(xp[:-1] <= xp[1:]), \"unsorted xp values in call to interp\"\n return np.interp(x, xp, yp)\n"
] |
[
[
"numpy.radians",
"numpy.broadcast_arrays",
"numpy.linalg.norm",
"numpy.arctan2",
"numpy.moveaxis",
"numpy.array",
"numpy.hypot"
],
[
"numpy.asarray",
"numpy.linalg.norm",
"numpy.all",
"numpy.interp",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yanconglin/Deep-Hough-Transform-Line-Priors
|
[
"628bae16c5deccc7b1c6688f295533b3adba86a7"
] |
[
"ht-lcnn/lcnn/models/HT.py"
] |
[
"#encoding:utf-8\n\"\"\"\nDeep-Hough-Transform-Line-Priors (ECCV 2020) https://arxiv.org/abs/2007.09493\n\nYancong Lin, and Silvia Laura Pintea, and Jan C. van Gemert\n\ne-mail: y.lin-1ATtudelftDOTnl\n\nVision Lab, Delft University of Technology\n\nMIT license\n\n\"\"\"\n\nfrom torch.nn import functional as F\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom scipy import ndimage\nimport cv2\nimport sys\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\n\n# ####################################HT########################################################\ndef hough_transform(rows, cols, theta_res, rho_res):\n\n theta = np.linspace(0, 180.0, int(np.ceil(180.0 / theta_res) + 1.0))\n theta = theta[0:len(theta) - 1]\n\n ### Actually,the offset does not have to this large, because the origin is located at the image center.\n D = np.sqrt((rows - 1) ** 2 + (cols - 1) ** 2)\n ### replace the line above to reduce unnecessray computation (significantly).\n # D = np.sqrt((rows/2) ** 2 + (cols/2) ** 2)\n \n q = np.ceil(D / rho_res)\n nrho = 2 * q + 1\n rho = np.linspace(-q * rho_res, q * rho_res, int(nrho))\n\n w = np.size(theta)\n h = np.size(rho)\n cos_value = np.cos(theta * np.pi / 180.0).astype(np.float32)\n sin_value = np.sin(theta * np.pi / 180.0).astype(np.float32)\n sin_cos = np.concatenate((sin_value[None, :], cos_value[None, :]), axis=0)\n\n ### This is much more memory-efficient by shifting the coordinate ####\n coords_r, coords_w = np.ones((rows, cols)).nonzero()\n coords = np.concatenate((coords_r[:,None], coords_w[:,None]), axis=1).astype(np.float32)\n\n coords[:,0] = rows-coords[:,0]-rows//2\n coords[:,1] = coords[:,1] +1 - cols//2\n\n vote_map = (coords @ sin_cos).astype(np.float32)\n\n vote_index = np.zeros((rows * cols, h, w))\n for i in range(rows*cols):\n for j in range(w):\n rhoVal = vote_map[i, j]\n rhoIdx = np.nonzero(np.abs(rho - rhoVal) == np.min(np.abs(rho - rhoVal)))[0]\n vote_map[i, j] = float(rhoIdx[0])\n vote_index[i, rhoIdx[0], j] = 1\n\n\n ### remove all-zero lines in the HT maps ####\n vote_rho_idx = vote_index.reshape(rows * cols, h, w).sum(axis=0).sum(axis=1)\n vote_index = vote_index[:,vote_rho_idx>0.0 ,:]\n ### update h, since we remove those HT lines without any votes\n ### slightly different from the original paper, the HT size in this script is 182x60.\n h = (vote_rho_idx>0.0).sum()\n return vote_index.reshape(rows, cols, h, w)\n\n\n# torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)\ndef make_conv_block(in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=False):\n layers = []\n layers += [nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)]\n ### no batchnorm layers\n # layers += [nn.BatchNorm2d(out_channels)]\n layers += [nn.ReLU(inplace=True)]\n return nn.Sequential(*layers)\n\nclass HT(nn.Module):\n def __init__(self, vote_index):\n super(HT, self).__init__()\n self.r, self.c, self.h, self.w = vote_index.size()\n self.norm = max(self.r, self.c)\n self.vote_index = vote_index.view(self.r * self.c, self.h *self.w)\n self.total = vote_index.sum(0).max()\n def forward(self, image):\n batch, channel, _, _ = image.size()\n image = image.view(batch,channel, -1).view(batch*channel, -1)\n image = F.relu(image)\n HT_map = image @ self.vote_index\n ### normalization ###\n # HT_map = HT_map/self.total\n ### normalized by max(rows, cols)\n HT_map = HT_map/(self.norm)\n HT_map = HT_map.view(batch, channel, -1).view(batch, channel, self.h, self.w)\n return HT_map\n\n\n\nclass IHT(nn.Module):\n def __init__(self, vote_index):\n super(IHT, self).__init__()\n self.r, self.c, self.h, self.w = vote_index.size()\n self.vote_index = vote_index.view(self.r * self.c, self.h * self.w).t()\n\n def forward(self, input_HT):\n batch, channel, _, _ = input_HT.size()\n input_HT = F.relu(input_HT)\n input_HT = input_HT.view(batch, channel, self.h * self.w).view(batch * channel, self.h * self.w)\n IHT_map = input_HT @ self.vote_index\n IHT_map = IHT_map.view(batch, channel, self.r*self.c).view(batch, channel, self.r, self.c)\n # return IHT_map/float(self.w)\n return IHT_map\n\n\nclass HTIHT(nn.Module):\n def __init__(self, vote_index, inplanes, outplanes):\n super(HTIHT, self).__init__()\n\n self.conv1 = nn.Sequential(*make_conv_block(inplanes, inplanes, kernel_size=(9,1), padding=(4,0), bias=True, groups=inplanes))\n self.conv2 = nn.Sequential(*make_conv_block(inplanes, outplanes, kernel_size=(9,1), padding=(4,0), bias=True))\n self.conv3 = nn.Sequential(*make_conv_block(outplanes, outplanes, kernel_size=(9,1), padding=(4,0), bias=True))\n\n self.relu = nn.ReLU(inplace=True)\n self.tanh = nn.Tanh()\n self.ht = HT(vote_index)\n self.iht = IHT(vote_index)\n\n filtersize = 4\n x = np.zeros(shape=((2 * filtersize + 1)))\n x[filtersize] = 1\n z = []\n for _ in range(0, inplanes):\n sigma = np.random.uniform(low=1, high=2.5, size=(1))\n y = ndimage.filters.gaussian_filter(x, sigma=sigma, order=2)\n y = -y / np.sum(np.abs(y))\n z.append(y)\n z = np.stack(z)\n self.conv1[0].weight.data.copy_(torch.from_numpy(z).unsqueeze(1).unsqueeze(3))\n nn.init.kaiming_normal_(self.conv2[0].weight, mode='fan_out', nonlinearity='relu')\n nn.init.kaiming_normal_(self.conv3[0].weight, mode='fan_out', nonlinearity='relu')\n\n def forward(self, x, **kwargs):\n out = self.ht(x)\n out = self.conv1(out)\n out = self.conv2(out)\n out = self.conv3(out)\n out = self.iht(out)\n return out\n\n\nclass CAT_HTIHT(nn.Module):\n\n def __init__(self, vote_index, inplanes, outplanes):\n super(CAT_HTIHT, self).__init__()\n self.htiht = HTIHT(vote_index, inplanes, outplanes)\n self.bn = nn.BatchNorm2d(inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.conv_cat = nn.Conv2d(inplanes+outplanes, inplanes, kernel_size=3, padding=1, bias=False)\n def forward(self, x):\n x = self.bn(x)\n x = self.relu(x)\n y = self.htiht(x)\n out = self.conv_cat(torch.cat([x,y], dim=1))\n return out\n\n\n\nif __name__ == \"__main__\":\n ### Default settings: (128, 128, 3, 1)\n vote_index = hough_transform(rows=128, cols=128, theta_res=3, rho_res=1)\n rows, cols, h, w = vote_index.shape\n print('vote_index', vote_index.shape)\n # sio.savemat('../../vote_index_128_31.mat', {'vote_index': vote_index})\n\n\n"
] |
[
[
"numpy.sqrt",
"torch.cat",
"numpy.concatenate",
"scipy.ndimage.filters.gaussian_filter",
"torch.from_numpy",
"numpy.stack",
"numpy.sin",
"numpy.ceil",
"numpy.size",
"torch.nn.functional.relu",
"numpy.zeros",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"numpy.abs",
"numpy.cos",
"torch.nn.Tanh",
"numpy.ones",
"numpy.random.uniform",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
m4ni5h/PythonScripts
|
[
"7adffd478cf5ab3863eb69af1c2a04b3655a872f"
] |
[
"NSEPyProject/PE_Earning_Analysis_Func.py"
] |
[
"# https://nsepy.readthedocs.io/en/latest/\n\nfrom nsepy import get_history\nfrom nsepy import get_index_pe_history\nfrom datetime import date\nimport calendar\nimport pandas as pd\nfrom pandas import Series, DataFrame\n\n# =(K3-K2)/K2*100\n# =(K3-K2)\nSTARTYEAR = 2011\nENDYEAR = 2020\n\n\ndef indexhistoryyear(indexsymbol, year):\n quarter = [3, 6, 9, 12]\n index_history = pd.DataFrame()\n for n in quarter:\n index_month_history = get_history(symbol=indexsymbol, start=date(year, n, 1),\n end=date(year, n, calendar.monthrange(year, n)[1]), index=True)\n index_history = index_history.append(index_month_history.iloc[[-1]])\n return index_history\n\n\n# indexhistoryyear(\"NIFTY AUTO\", 2019)\n\ndef indexhistory(indexsymbol):\n indexhistyear = pd.DataFrame()\n years = range(STARTYEAR, ENDYEAR)\n for year in years:\n indexhistyear = indexhistyear.append(indexhistoryyear(indexsymbol, year))\n indexhistyear = indexhistyear.append(\n get_history(symbol=indexsymbol, start=date(2020, 3, 31), end=date(2020, 3, 31), index=True))\n return indexhistyear\n\n\ndef PEhistoryyear(indexsymbol, year):\n quarter = [3, 6, 9, 12]\n PE_history = pd.DataFrame()\n for n in quarter:\n PE_month_history = get_index_pe_history(symbol=indexsymbol, start=date(year, n, 1),\n end=date(year, n, calendar.monthrange(year, n)[1]))\n PE_history = PE_history.append(PE_month_history.iloc[[-1]])\n return PE_history\n\n\n# PEhistoryyear(\"NIFTY ENERGY\", 2009)\n\ndef PEhistory(indexsymbol):\n PEhistyear = pd.DataFrame()\n years = range(STARTYEAR, ENDYEAR)\n for year in years:\n PEhistyear = PEhistyear.append(PEhistoryyear(indexsymbol, year))\n PEhistyear = PEhistyear.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2020, 3, 31), end=date(2020, 3, 31)))\n return PEhistyear\n\n\n# PEhistory(\"NIFTY AUTO\")\n\ndef oldindexhistory(indexsymbol):\n index_history = get_history(symbol=indexsymbol, start=date(2009, 3, 31), end=date(2009, 3, 31), index=True)\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2010, 3, 31), end=date(2010, 3, 31), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2011, 3, 31), end=date(2011, 3, 31), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2012, 3, 30), end=date(2012, 3, 30), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2013, 3, 28), end=date(2013, 3, 28), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2014, 3, 31), end=date(2014, 3, 31), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2015, 3, 31), end=date(2015, 3, 31), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2016, 3, 31), end=date(2016, 3, 31), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2017, 3, 31), end=date(2017, 3, 31), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2018, 3, 28), end=date(2018, 3, 28), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2019, 3, 29), end=date(2019, 3, 29), index=True))\n index_history = index_history.append(\n get_history(symbol=indexsymbol, start=date(2020, 3, 31), end=date(2020, 3, 31), index=True))\n print(index_history)\n return index_history\n\n\ndef oldPEhistory(indexsymbol):\n pe_history = get_index_pe_history(symbol=indexsymbol, start=date(2009, 3, 31), end=date(2009, 3, 31))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2010, 3, 31), end=date(2010, 3, 31)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2011, 3, 31), end=date(2011, 3, 31)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2012, 3, 30), end=date(2012, 3, 30)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2013, 3, 28), end=date(2013, 3, 28)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2014, 3, 31), end=date(2014, 3, 31)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2015, 3, 31), end=date(2015, 3, 31)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2016, 3, 31), end=date(2016, 3, 31)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2017, 3, 31), end=date(2017, 3, 31)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2018, 3, 28), end=date(2018, 3, 28)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2019, 3, 29), end=date(2019, 3, 29)))\n pe_history = pe_history.append(\n get_index_pe_history(symbol=indexsymbol, start=date(2020, 3, 31), end=date(2020, 3, 31)))\n print(pe_history)\n return pe_history\n\n\ndef earninganalysis(indexsymbol):\n pe_history = PEhistory(indexsymbol)\n index_history = indexhistory(indexsymbol)\n pe_analysis = pd.merge(pe_history, index_history, on='Date')\n earnings = (pe_analysis['Close'] / pe_analysis['P/E']).rename(\"Earnings\")\n earnings = pd.DataFrame(earnings)\n pe_analysis = pd.merge(pe_analysis, earnings, on='Date')\n csvfile = indexsymbol + \"_PEAnalysis.csv\"\n pe_analysis.to_csv(csvfile)\n\n\n# earninganalysis(\"NIFTY INFRASTRUCTURE\")\n# , \"NIFTY ENERGY\" 2011,\n# \"NIFTY FINANCIAL SERVICES\", \"NIFTY FMCG\", \"NIFTY METAL\", \"NIFTY PHARMA\", \"NIFTY INFRASTRUCTURE\"\nIndices = [\"NIFTY 50\", \"NIFTY AUTO\", \"NIFTY BANK\", \"NIFTY IT\", \"NIFTY REALTY\", \"NIFTY COMMODITIES\",\n \"NIFTY ENERGY\",\n # \"NIFTY FINANCIAL SERVICES\",\n \"NIFTY FMCG\", \"NIFTY METAL\", \"NIFTY PHARMA\"\n # , \"NIFTY INFRASTRUCTURE\"\n ]\nfor nseindex in Indices:\n print(nseindex)\n earninganalysis(nseindex)\n print(\"Done\")\n"
] |
[
[
"pandas.merge",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ErillLab/MGE_TF
|
[
"31af2a203b9a1fde73d35a668b61f365af325257"
] |
[
"src/mge.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport random\r\nimport copy\r\nfrom Bio.Seq import Seq\r\nfrom genome import Genome\r\nimport warnings\r\n\r\n\r\nclass MGE():\r\n \r\n def __init__(self, filepath, fileformat):\r\n \r\n self.original = Genome(filepath, fileformat)\r\n self.pseudogenomes = []\r\n self.source = (filepath, fileformat)\r\n self.pseudo_g_counter = 0\r\n self.n_bins = 50 # !!! Set from config\r\n \r\n # p-values\r\n self.n_sites = None\r\n self.site_density = None\r\n self.avg_score = None\r\n self.extremeness = None\r\n self.entropy = None\r\n self.norm_entropy = None\r\n self.gini = None\r\n self.norm_gini = None\r\n self.evenness = None\r\n self.new_evenness = None\r\n self.intergenicity = None\r\n \r\n def set_pseudogenomes(self, n_pseudogenomes, kmer_len):\r\n '''\r\n Sets the pseudogenomes attribute: a list of pseudogenomes.\r\n '''\r\n for i in range(n_pseudogenomes):\r\n pseudogenome = self.get_pseudogenome(kmer_len)\r\n self.pseudogenomes.append(pseudogenome)\r\n \r\n def get_pseudogenome(self, kmer_len):\r\n '''\r\n It generates a 'pseudogenome'. For each genomic unit in the original\r\n genome sequence, a k-sampled sequence is generated. The pseudogenome is\r\n composed of these pseudo-units (k-sampled sequences) joined in the same\r\n order as their corresponding units appear on the original genome, to\r\n preserve genomic structure. In other words, each genomic unit is\r\n independently 'k-sampled' (using the 'get_k_sampled_sequence' method).\r\n '''\r\n pseudogenome = copy.deepcopy(self.original)\r\n self.clear_stats(pseudogenome)\r\n pseudogenome.seq = Seq(\"\")\r\n units_bounds = pseudogenome.genomic_units['bounds']\r\n for i in range(len(units_bounds)-1):\r\n unit = self.original.seq[units_bounds[i]: units_bounds[i+1]]\r\n pseudogenome.seq += self.get_k_sampled_sequence(unit, kmer_len)\r\n \r\n # The permuted genome is assigned a unique ID\r\n self.increase_pseudo_g_counter()\r\n pseudogenome.id = str(pseudogenome.id) + '_' + str(self.pseudo_g_counter)\r\n pseudogenome.name = str(pseudogenome.name) + ' pseudo_' + str(self.pseudo_g_counter)\r\n pseudogenome.description = str(pseudogenome.description) + 'pseudo_' + str(self.pseudo_g_counter)\r\n return pseudogenome\r\n \r\n def increase_pseudo_g_counter(self):\r\n self.pseudo_g_counter += 1\r\n \r\n def get_k_sampled_sequence(self, sequence, k):\r\n '''\r\n All kmers are stored. Than sampled without replacement.\r\n Example with k = 3:\r\n ATCAAAGTCCCCGTACG\r\n for which 3-mers are\r\n ATC, TCA, CAA, AAA, AAG, ...\r\n A new sequence is generated by sampling (without replacement) from that\r\n complete set of k-mers.\r\n The nucleotide content (1-mers content) may not be perfectly identical\r\n because of overlap between k-mers that are then randomly sampled.\r\n '''\r\n \r\n if k > 1:\r\n n_kmers = len(sequence) // k\r\n n_nuclotides_rem = len(sequence) % k\r\n \r\n all_kmers = self.get_all_kmers(sequence, k)\r\n sampled_seq_list = random.sample(all_kmers, n_kmers)\r\n n_nucleotides = random.sample(str(sequence), n_nuclotides_rem)\r\n sampled_seq_list += n_nucleotides\r\n \r\n else:\r\n sampled_seq_list = random.sample(str(sequence), len(sequence))\r\n \r\n sampled_seq = Seq(\"\".join(sampled_seq_list))\r\n return sampled_seq\r\n \r\n def get_all_kmers(self, seq, k):\r\n '''\r\n Returns the list of all the k-mers of length k in sequence seq.\r\n '''\r\n return [str(seq)[i:i+k] for i in range(len(seq)-k+1)]\r\n \r\n def clear_stats(self, genome):\r\n ''' Ensures all the statistics in the 'stats' list are set to None. '''\r\n stats = ['n_sites', 'site_density', 'avg_score', 'extremeness',\r\n 'counts', 'entropy', 'norm_entropy', 'gini', 'norm_gini',\r\n 'evenness', 'new_evenness', 'intergenicity']\r\n for stat in stats:\r\n vars(genome)[stat] = None\r\n \r\n def scan(self, motif, pseudocount, threshold=None):\r\n '''\r\n Scans the original genome and all the pseudogenomes with the PSSM of a\r\n given motif.\r\n '''\r\n self.original.scan(motif, pseudocount, threshold=threshold)\r\n for pg in self.pseudogenomes:\r\n pg.scan(motif, pseudocount, threshold=threshold)\r\n \r\n def analyze_scores(self):\r\n ''' Sets the p-value for the statistics related to the PSSM-scores. '''\r\n genomes = [self.original] + self.pseudogenomes\r\n for g in genomes:\r\n g.analyze_scores()\r\n # Set p-values\r\n self.set_pvalue('avg_score', 'greater')\r\n self.set_pvalue('extremeness', 'greater')\r\n \r\n def analyze_positional_distribution(self):\r\n ''' Sets the p-value for the statistics related to the positional\r\n distribution. '''\r\n genomes = [self.original] + self.pseudogenomes\r\n for g in genomes:\r\n g.analyze_positional_distribution(self.n_bins)\r\n # Set p-values\r\n self.set_pvalue('entropy', 'smaller')\r\n self.set_pvalue('norm_entropy', 'smaller')\r\n self.set_pvalue('gini', 'greater')\r\n self.set_pvalue('norm_gini', 'greater')\r\n self.set_pvalue('evenness', 'greater')\r\n self.set_pvalue('new_evenness', 'smaller')\r\n \r\n def analyze_intergenicity(self):\r\n ''' Sets the p-value for the statistics related to the intergenicity. '''\r\n genomes = [self.original] + self.pseudogenomes\r\n for g in genomes:\r\n g.analyze_intergenicity()\r\n # Set p-values\r\n self.set_pvalue('intergenicity', 'greater')\r\n \r\n def set_pvalue(self, metric, alternative):\r\n '''\r\n Estimates the p-value for a given metric, and a given alternative\r\n hypothesis. The estimate is based on the frequency of pseudogenomes\r\n that can reproduce the results observed on the original genome.\r\n '''\r\n control_values = []\r\n for genome in self.pseudogenomes:\r\n control_values.append(vars(genome)[metric])\r\n \r\n if None in control_values:\r\n raise ValueError('The value of ' + str(metric) +\r\n ' is not set for all pseudogenomes.')\r\n \r\n valid_values = [x for x in control_values if not isinstance(x, str)]\r\n if len(valid_values) < len(control_values):\r\n warnings.warn(\"Only {}/{} values of {} were valid and used to \\\r\n estimate the p-value.\".format(len(valid_values),\r\n len(control_values), metric))\r\n \r\n control = np.array(valid_values)\r\n obs = vars(self.original)[metric]\r\n \r\n if obs == None:\r\n p_val = 'no_obs'\r\n \r\n else:\r\n if alternative == 'greater':\r\n p_val = (control >= obs).sum()/len(control)\r\n elif alternative == 'smaller':\r\n p_val = (control <= obs).sum()/len(control)\r\n else:\r\n raise ValueError('alternative should be \"greater\" or \"smaller\".')\r\n \r\n # Set p_value\r\n vars(self)[metric] = p_val\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mgoldchild/keras-onnx
|
[
"8e700572b89a907ca21a3096556f64b62b7aa76c",
"c08d52bf4d4ec2bba69ec4ffd2ea14f47fecb1f5"
] |
[
"applications/mask_rcnn/mask_rcnn.py",
"keras2onnx/ke2onnx/merge.py"
] |
[
"import os\nimport sys\nimport numpy as np\nimport skimage\nimport onnx\nimport keras2onnx\n\nfrom mrcnn.config import Config\nfrom mrcnn.model import BatchNorm, DetectionLayer\nfrom mrcnn import model as modellib\nfrom mrcnn import visualize\n\nfrom keras2onnx import set_converter\nfrom keras2onnx.ke2onnx.batch_norm import convert_keras_batch_normalization\nfrom keras2onnx.proto import onnx_proto\nfrom keras2onnx.common.onnx_ops import apply_transpose, apply_identity\nfrom keras2onnx.common.onnx_ops import OnnxOperatorBuilder\nimport tf2onnx\nfrom onnx import onnx_pb, helper\n\n\nROOT_DIR = os.path.abspath(\"./\")\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n\nclass CocoConfig(Config):\n \"\"\"Configuration for training on MS COCO.\n Derives from the base Config class and overrides values specific\n to the COCO dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"coco\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 2\n\n # Uncomment to train on 8 GPUs (default is 1)\n # GPU_COUNT = 8\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 80 # COCO has 80 classes\n\n\nclass InferenceConfig(CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n\nconfig = InferenceConfig()\nconfig.display()\n\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\n\ndef convert_BatchNorm(scope, operator, container):\n convert_keras_batch_normalization(scope, operator, container)\n\n\ndef convert_apply_box_deltas_graph(scope, operator, container, oopb, box_transpose, score_identity, deltas_transpose, windows_transpose):\n box_squeeze = scope.get_unique_variable_name('box_squeeze')\n attrs = {'axes': [0]}\n container.add_node('Squeeze', box_transpose, box_squeeze, op_version=operator.target_opset,\n **attrs)\n # output shape: [spatial_dimension, 4]\n\n deltas_squeeze = scope.get_unique_variable_name('deltas_squeeze')\n attrs = {'axes': [0]}\n container.add_node('Squeeze', deltas_transpose, deltas_squeeze, op_version=operator.target_opset,\n **attrs)\n # output shape: [spatial_dimension, num_classes, 4]\n\n score_squeeze = scope.get_unique_variable_name('score_squeeze')\n attrs = {'axes': [0]}\n container.add_node('Squeeze', score_identity, score_squeeze, op_version=operator.target_opset,\n **attrs)\n # output shape: [spatial_dimension, num_classes]\n\n class_ids = scope.get_unique_variable_name('class_ids')\n attrs = {'axis': 1}\n container.add_node('ArgMax', score_squeeze, class_ids, op_version=operator.target_opset,\n **attrs)\n # output shape: [spatial_dimension, 1]\n\n prob_shape = oopb.add_node('Shape',\n [score_squeeze],\n operator.inputs[1].full_name + '_prob_shape')\n prob_shape_0 = oopb.add_node('Slice',\n [prob_shape,\n ('_start', oopb.int64, np.array([0], dtype='int64')),\n ('_end', oopb.int64, np.array([1], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[1].full_name + '_prob_shape_0')\n prob_range = oopb.add_node('Range',\n [('_start', oopb.int64, np.array([0], dtype='int64')),\n prob_shape_0,\n # ('_limit', oopb.int64, np.array([1000], dtype='int64')),\n ('_delta', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[1].full_name + '_prob_range',\n op_domain='com.microsoft')\n\n attrs = {'axes': [1]}\n prob_range_unsqueeze = oopb.add_node('Unsqueeze',\n [prob_range],\n operator.inputs[1].full_name + '_prob_range_unsqueeze',\n **attrs)\n # output shape: [spatial_dimension, 1]\n\n attrs = {'axis': 1}\n indices = oopb.add_node('Concat',\n [prob_range_unsqueeze,\n class_ids\n ],\n operator.inputs[1].full_name + '_indices', **attrs)\n # output shape: [spatial_dimension, 2]\n\n deltas_specific = oopb.add_node('GatherND',\n [deltas_squeeze, indices],\n operator.inputs[2].full_name + '_deltas_specific',\n op_domain='com.microsoft')\n # output shape: [spatial_dimension, 4]\n\n BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2], dtype='float32')\n delta_mul_output = oopb.add_node('Mul',\n [deltas_specific,\n ('_mul_constant', oopb.float, BBOX_STD_DEV)\n ],\n operator.inputs[2].full_name + '_mul')\n # output shape: [spatial_dimension, 4]\n\n box_0 = oopb.add_node('Slice',\n [box_squeeze,\n ('_start', oopb.int64, np.array([0], dtype='int64')),\n ('_end', oopb.int64, np.array([1], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_sliced_0')\n box_1 = oopb.add_node('Slice',\n [box_squeeze,\n ('_start', oopb.int64, np.array([1], dtype='int64')),\n ('_end', oopb.int64, np.array([2], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_sliced_1')\n box_2 = oopb.add_node('Slice',\n [box_squeeze,\n ('_start', oopb.int64, np.array([2], dtype='int64')),\n ('_end', oopb.int64, np.array([3], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_sliced_2')\n box_3 = oopb.add_node('Slice',\n [box_squeeze,\n ('_start', oopb.int64, np.array([3], dtype='int64')),\n ('_end', oopb.int64, np.array([4], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_sliced_3')\n\n delta_0 = oopb.add_node('Slice',\n [delta_mul_output,\n ('_start', oopb.int64, np.array([0], dtype='int64')),\n ('_end', oopb.int64, np.array([1], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[3].full_name + '_sliced_0')\n delta_1 = oopb.add_node('Slice',\n [delta_mul_output,\n ('_start', oopb.int64, np.array([1], dtype='int64')),\n ('_end', oopb.int64, np.array([2], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[3].full_name + '_sliced_1')\n delta_2 = oopb.add_node('Slice',\n [delta_mul_output,\n ('_start', oopb.int64, np.array([2], dtype='int64')),\n ('_end', oopb.int64, np.array([3], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[3].full_name + '_sliced_2')\n delta_3 = oopb.add_node('Slice',\n [delta_mul_output,\n ('_start', oopb.int64, np.array([3], dtype='int64')),\n ('_end', oopb.int64, np.array([4], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[3].full_name + '_sliced_3')\n\n height = oopb.add_node('Sub',\n [box_2, box_0],\n operator.inputs[0].full_name + '_height')\n width = oopb.add_node('Sub',\n [box_3, box_1],\n operator.inputs[0].full_name + '_width')\n\n half_height_0 = oopb.add_node('Mul',\n [height,\n ('_mul_constant', oopb.float, np.array([0.5], dtype='float32'))\n ],\n operator.inputs[0].full_name + '_half_height_0')\n half_width_0 = oopb.add_node('Mul',\n [width,\n ('_mul_constant', oopb.float, np.array([0.5], dtype='float32'))\n ],\n operator.inputs[0].full_name + '_half_width_0')\n center_y_0 = oopb.add_node('Add',\n [box_0, half_height_0],\n operator.inputs[0].full_name + '_center_y_0')\n center_x_0 = oopb.add_node('Add',\n [box_1, half_width_0],\n operator.inputs[0].full_name + '_center_x_0')\n\n delta_height = oopb.add_node('Mul',\n [delta_0, height],\n operator.inputs[0].full_name + '_delta_height')\n delta_width = oopb.add_node('Mul',\n [delta_1, width],\n operator.inputs[0].full_name + '_delta_width')\n center_y_1 = oopb.add_node('Add',\n [center_y_0, delta_height],\n operator.inputs[0].full_name + '_center_y_1')\n center_x_1 = oopb.add_node('Add',\n [center_x_0, delta_width],\n operator.inputs[0].full_name + '_center_x_1')\n\n delta_2_exp = oopb.add_node('Exp',\n [delta_2],\n operator.inputs[0].full_name + '_delta_2_exp')\n delta_3_exp = oopb.add_node('Exp',\n [delta_3],\n operator.inputs[0].full_name + '_delta_3_exp')\n height_exp = oopb.add_node('Mul',\n [height, delta_2_exp],\n operator.inputs[0].full_name + '_height_exp')\n width_exp = oopb.add_node('Mul',\n [width, delta_3_exp],\n operator.inputs[0].full_name + '_width_exp')\n\n half_height_1 = oopb.add_node('Mul',\n [height_exp,\n ('_mul_constant', oopb.float, np.array([0.5], dtype='float32'))\n ],\n operator.inputs[0].full_name + '_half_height_1')\n half_width_1 = oopb.add_node('Mul',\n [width_exp,\n ('_mul_constant', oopb.float, np.array([0.5], dtype='float32'))\n ],\n operator.inputs[0].full_name + '_half_width_1')\n y1 = oopb.add_node('Sub',\n [center_y_1, half_height_1],\n operator.inputs[0].full_name + '_y1')\n x1 = oopb.add_node('Sub',\n [center_x_1, half_width_1],\n operator.inputs[0].full_name + '_x1')\n y2 = oopb.add_node('Add',\n [y1, height_exp],\n operator.inputs[0].full_name + '_y2')\n x2 = oopb.add_node('Add',\n [x1, width_exp],\n operator.inputs[0].full_name + '_x2')\n\n windows_squeeze = scope.get_unique_variable_name('windows_squeeze')\n attrs = {'axes': [0]}\n container.add_node('Squeeze', windows_transpose, windows_squeeze, op_version=operator.target_opset,\n **attrs)\n wy1 = oopb.add_node('Slice',\n [windows_squeeze,\n ('_start', oopb.int64, np.array([0], dtype='int64')),\n ('_end', oopb.int64, np.array([1], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_windows_0')\n wx1 = oopb.add_node('Slice',\n [windows_squeeze,\n ('_start', oopb.int64, np.array([1], dtype='int64')),\n ('_end', oopb.int64, np.array([2], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_windows_1')\n wy2 = oopb.add_node('Slice',\n [windows_squeeze,\n ('_start', oopb.int64, np.array([2], dtype='int64')),\n ('_end', oopb.int64, np.array([3], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_windows_2')\n wx2 = oopb.add_node('Slice',\n [windows_squeeze,\n ('_start', oopb.int64, np.array([3], dtype='int64')),\n ('_end', oopb.int64, np.array([4], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_windows_3')\n y1_min = oopb.add_node('Min',\n [y1, wy2],\n operator.inputs[0].full_name + '_y1_min')\n x1_min = oopb.add_node('Min',\n [x1, wx2],\n operator.inputs[0].full_name + '_x1_min')\n y2_min = oopb.add_node('Min',\n [y2, wy2],\n operator.inputs[0].full_name + '_y2_min')\n x2_min = oopb.add_node('Min',\n [x2, wx2],\n operator.inputs[0].full_name + '_x2_min')\n y1_max = oopb.add_node('Max',\n [y1_min, wy1],\n operator.inputs[0].full_name + '_y1_max')\n x1_max = oopb.add_node('Max',\n [x1_min, wx1],\n operator.inputs[0].full_name + '_x1_max')\n y2_max = oopb.add_node('Max',\n [y2_min, wy1],\n operator.inputs[0].full_name + '_y2_max')\n x2_max = oopb.add_node('Max',\n [x2_min, wx1],\n operator.inputs[0].full_name + '_x2_max')\n concat_result = scope.get_unique_variable_name(operator.output_full_names[0] + '_concat_result')\n attrs = {'axis': 1}\n container.add_node(\"Concat\",\n [y1_max, x1_max, y2_max, x2_max],\n concat_result,\n op_version=operator.target_opset,\n name=operator.outputs[0].full_name + '_concat_result', **attrs)\n\n concat_unsqueeze = scope.get_unique_variable_name('concat_unsqueeze')\n attrs = {'axes': [0]}\n container.add_node('Unsqueeze', concat_result, concat_unsqueeze, op_version=operator.target_opset,\n **attrs)\n return concat_unsqueeze\n\n\ndef norm_boxes_graph(scope, operator, container, oopb, image_meta):\n image_shapes = oopb.add_node('Slice',\n [image_meta,\n ('_start', oopb.int64, np.array([4], dtype='int64')),\n ('_end', oopb.int64, np.array([7], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_image_shapes')\n image_shape = oopb.add_node('Slice',\n [image_shapes,\n ('_start', oopb.int64, np.array([0], dtype='int64')),\n ('_end', oopb.int64, np.array([1], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_image_shape')\n image_shape_squeeze = scope.get_unique_variable_name('image_shape_squeeze')\n attrs = {'axes': [0]}\n container.add_node('Squeeze', image_shape, image_shape_squeeze, op_version=operator.target_opset,\n **attrs)\n window = oopb.add_node('Slice',\n [image_meta,\n ('_start', oopb.int64, np.array([7], dtype='int64')),\n ('_end', oopb.int64, np.array([11], dtype='int64')),\n ('_axes', oopb.int64, np.array([1], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_window')\n h_norm = oopb.add_node('Slice',\n [image_shape_squeeze,\n ('_start', oopb.int64, np.array([0], dtype='int64')),\n ('_end', oopb.int64, np.array([1], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_h_norm')\n w_norm = oopb.add_node('Slice',\n [image_shape_squeeze,\n ('_start', oopb.int64, np.array([1], dtype='int64')),\n ('_end', oopb.int64, np.array([2], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_w_norm')\n h_norm_float = scope.get_unique_variable_name('h_norm_float')\n attrs = {'to': 1}\n container.add_node('Cast', h_norm, h_norm_float, op_version=operator.target_opset,\n **attrs)\n w_norm_float = scope.get_unique_variable_name('w_norm_float')\n attrs = {'to': 1}\n container.add_node('Cast', w_norm, w_norm_float, op_version=operator.target_opset,\n **attrs)\n hw_concat = scope.get_unique_variable_name(operator.inputs[0].full_name + '_hw_concat')\n attrs = {'axis': -1}\n container.add_node(\"Concat\",\n [h_norm_float, w_norm_float, h_norm_float, w_norm_float],\n hw_concat,\n op_version=operator.target_opset,\n name=operator.inputs[0].full_name + '_hw_concat', **attrs)\n scale = oopb.add_node('Sub',\n [hw_concat,\n ('_sub', oopb.float, np.array([1.0], dtype='float32'))\n ],\n operator.inputs[0].full_name + '_scale')\n boxes_shift = oopb.add_node('Sub',\n [window,\n ('_sub', oopb.float, np.array([0.0, 0.0, 1.0, 1.0], dtype='float32'))\n ],\n operator.inputs[0].full_name + '_boxes_shift')\n divide = oopb.add_node('Div',\n [boxes_shift, scale],\n operator.inputs[0].full_name + '_divide')\n # output shape: [batch, 4]\n return divide\n\n\ndef convert_DetectionLayer(scope, operator, container):\n # type: (keras2onnx.common.InterimContext, keras2onnx.common.Operator, keras2onnx.common.OnnxObjectContainer) -> None\n DETECTION_MAX_INSTANCES = 100\n DETECTION_NMS_THRESHOLD = 0.3\n DETECTION_MIN_CONFIDENCE = 0.7\n\n oopb = OnnxOperatorBuilder(container, scope)\n box_transpose = scope.get_unique_variable_name(operator.inputs[0].full_name + '_tx')\n score_transpose = scope.get_unique_variable_name(operator.inputs[1].full_name + '_tx')\n\n # apply_transpose(scope, operator.inputs[0].full_name, box_transpose, container, perm=[2, 0, 1])\n apply_identity(scope, operator.inputs[0].full_name, box_transpose, container)\n # output shape: [num_batches, spatial_dimension, 4]\n score_identity = scope.get_unique_variable_name(operator.inputs[1].full_name + '_id')\n apply_identity(scope, operator.inputs[1].full_name, score_identity, container)\n # output shape: [num_batches, spatial_dimension, num_classes]\n\n deltas_transpose = scope.get_unique_variable_name(operator.inputs[2].full_name + '_tx')\n apply_identity(scope, operator.inputs[2].full_name, deltas_transpose, container)\n image_meta = scope.get_unique_variable_name(operator.inputs[3].full_name + '_tx')\n apply_identity(scope, operator.inputs[3].full_name, image_meta, container)\n windows_transpose = norm_boxes_graph(scope, operator, container, oopb, image_meta)\n delta_mul_output = convert_apply_box_deltas_graph(scope, operator, container, oopb, box_transpose, score_identity, deltas_transpose, windows_transpose)\n\n sliced_score = oopb.add_node('Slice',\n [score_identity,\n ('_start', oopb.int64, np.array([1], dtype='int64')),\n ('_end', oopb.int64, np.array([81], dtype='int64')),\n ('_axes', oopb.int64, np.array([2], dtype='int64'))\n ],\n operator.inputs[1].full_name + '_sliced')\n apply_transpose(scope, sliced_score, score_transpose, container, perm=[0, 2, 1])\n # output shape: [num_batches, num_classes, spatial_dimension]\n\n max_output_size = scope.get_unique_variable_name('max_output_size')\n iou_threshold = scope.get_unique_variable_name('iou_threshold')\n score_threshold = scope.get_unique_variable_name('layer.score_threshold')\n\n container.add_initializer(max_output_size, onnx_proto.TensorProto.INT64,\n [], [DETECTION_MAX_INSTANCES])\n container.add_initializer(iou_threshold, onnx_proto.TensorProto.FLOAT,\n [], [DETECTION_NMS_THRESHOLD])\n container.add_initializer(score_threshold, onnx_proto.TensorProto.FLOAT,\n [], [DETECTION_MIN_CONFIDENCE])\n\n nms_node = next((nd_ for nd_ in operator.nodelist if nd_.type == 'NonMaxSuppressionV3'), operator.nodelist[0])\n nms_output = scope.get_unique_variable_name(operator.output_full_names[0] + '_nms')\n container.add_node(\"NonMaxSuppression\",\n [delta_mul_output, score_transpose, max_output_size, iou_threshold, score_threshold],\n nms_output,\n op_version=operator.target_opset,\n name=nms_node.name)\n\n add_init = scope.get_unique_variable_name('add')\n container.add_initializer(add_init, onnx_proto.TensorProto.INT64,\n [1, 3], [0, 1, 0])\n nms_output_add = scope.get_unique_variable_name(operator.output_full_names[0] + '_class_add')\n container.add_node(\"Add\",\n [nms_output, add_init],\n nms_output_add,\n op_version=operator.target_opset,\n name=nms_node.name + '_class_idx_add')\n\n starts_init = scope.get_unique_variable_name('starts')\n ends_init = scope.get_unique_variable_name('ends')\n axes_init = scope.get_unique_variable_name('axes')\n\n container.add_initializer(starts_init, onnx_proto.TensorProto.INT32,\n [1], [1])\n container.add_initializer(ends_init, onnx_proto.TensorProto.INT32,\n [1], [2])\n container.add_initializer(axes_init, onnx_proto.TensorProto.INT32,\n [1], [1])\n\n class_idx_output = scope.get_unique_variable_name(operator.output_full_names[0] + '_class_idx')\n container.add_node(\"Slice\",\n [nms_output_add, starts_init, ends_init, axes_init],\n class_idx_output,\n op_version=operator.target_opset,\n name=nms_node.name+'_class_idx')\n # output shape: [num_selected_indices, 1]\n\n starts_init_2 = scope.get_unique_variable_name('starts')\n ends_init_2 = scope.get_unique_variable_name('ends')\n axes_init_2 = scope.get_unique_variable_name('axes')\n\n container.add_initializer(starts_init_2, onnx_proto.TensorProto.INT32,\n [1], [2])\n container.add_initializer(ends_init_2, onnx_proto.TensorProto.INT32,\n [1], [3])\n container.add_initializer(axes_init_2, onnx_proto.TensorProto.INT32,\n [1], [1])\n\n box_idx_output = scope.get_unique_variable_name(operator.output_full_names[0] + '_box_idx')\n container.add_node(\"Slice\",\n [nms_output_add, starts_init_2, ends_init_2, axes_init_2],\n box_idx_output,\n op_version=operator.target_opset,\n name=nms_node.name + '_box_idx')\n # output shape: [num_selected_indices, 1]\n\n box_idx_squeeze = scope.get_unique_variable_name(operator.output_full_names[0] + '_box_idx_squeeze')\n attrs = {'axes': [1]}\n container.add_node(\"Squeeze\",\n box_idx_output,\n box_idx_squeeze,\n op_version=operator.target_opset,\n name=nms_node.name + '_box_idx_squeeze', **attrs)\n # output shape: [num_selected_indices]\n\n starts_init_3 = scope.get_unique_variable_name('starts')\n ends_init_3 = scope.get_unique_variable_name('ends')\n axes_init_3 = scope.get_unique_variable_name('axes')\n step_init_3 = scope.get_unique_variable_name('steps')\n\n container.add_initializer(starts_init_3, onnx_proto.TensorProto.INT32,\n [1], [2])\n container.add_initializer(ends_init_3, onnx_proto.TensorProto.INT32,\n [1], [0])\n container.add_initializer(axes_init_3, onnx_proto.TensorProto.INT32,\n [1], [1])\n container.add_initializer(step_init_3, onnx_proto.TensorProto.INT32,\n [1], [-1])\n from keras2onnx.common.data_types import Int32TensorType, FloatTensorType\n class_box_idx_output = scope.get_local_variable_or_declare_one(operator.output_full_names[0] + '_class_box_idx',\n type=Int32TensorType(shape=[None, 2]))\n container.add_node(\"Slice\",\n [nms_output_add, starts_init_3, ends_init_3, axes_init_3, step_init_3],\n class_box_idx_output.full_name,\n op_version=operator.target_opset,\n name=nms_node.name + '_class_box_idx')\n # output shape: [num_selected_indices, 2]\n\n box_squeeze = scope.get_unique_variable_name(operator.output_full_names[0] + '_box_squeeze')\n attrs = {'axes': [0]}\n container.add_node(\"Squeeze\",\n delta_mul_output,\n box_squeeze,\n op_version=operator.target_opset,\n name=nms_node.name + '_box_squeeze', **attrs)\n # output shape: [spatial_dimension, 4]\n\n score_squeeze = scope.get_local_variable_or_declare_one(operator.output_full_names[0] + '_score_squeeze',\n type=FloatTensorType(shape=[None]))\n attrs = {'axes': [0]}\n container.add_node(\"Squeeze\",\n score_identity,\n score_squeeze.full_name,\n op_version=operator.target_opset,\n name=nms_node.name + '_score_squeeze', **attrs)\n # output shape: [spatial_dimension, num_classes]\n\n box_gather = scope.get_unique_variable_name(operator.output_full_names[0] + '_box_gather')\n attrs = {'axis': 0}\n container.add_node(\"Gather\",\n [box_squeeze, box_idx_squeeze],\n box_gather,\n op_version=operator.target_opset,\n name=nms_node.name + '_box_gather', **attrs)\n # output shape: [num_selected_indices, 4]\n\n score_gather = scope.get_unique_variable_name(operator.output_full_names[0] + '_score_gather')\n container.add_node(\"GatherND\",\n [score_squeeze.full_name, class_box_idx_output.full_name],\n score_gather,\n op_version=operator.target_opset, op_domain='com.microsoft',\n name=nms_node.name + '_score_gather')\n # output shape: [num_selected_indices]\n\n score_gather_unsqueeze = scope.get_unique_variable_name(operator.output_full_names[0] + '_score_gather_unsqueeze')\n attrs = {'axes': [1]}\n container.add_node(\"Unsqueeze\",\n score_gather,\n score_gather_unsqueeze,\n op_version=operator.target_opset,\n name=nms_node.name + '_score_gather_unsqueeze', **attrs)\n # output shape: [num_selected_indices, 1]\n\n\n top_k_var = scope.get_unique_variable_name('topK')\n container.add_initializer(top_k_var, onnx_proto.TensorProto.FLOAT,\n [1], [100.0])\n\n score_gather_shape = oopb.add_node('Shape',\n [score_gather],\n operator.inputs[1].full_name + '_score_gather_shape')\n attrs = {'to': 1}\n scope_gather_float = oopb.add_node('Cast',\n [score_gather_shape],\n operator.inputs[1].full_name + '_scope_gather_float', **attrs)\n top_k_min = oopb.add_node('Min',\n [scope_gather_float, top_k_var],\n operator.inputs[1].full_name + '_top_k_min')\n attrs = {'to': 7}\n top_k_min_int = oopb.add_node('Cast',\n [top_k_min],\n operator.inputs[1].full_name + '_top_k_min_int', **attrs)\n\n\n score_top_k_output_val = scope.get_unique_variable_name(operator.output_full_names[0] + '_score_top_k_output_val')\n # output shape: [num_top_K]\n score_top_k_output_idx = scope.get_unique_variable_name(operator.output_full_names[0] + '_score_top_k_output_idx')\n # output shape: [num_top_K]\n attrs = {'axis': 0}\n container.add_node('TopK', [score_gather, top_k_min_int], [score_top_k_output_val, score_top_k_output_idx],\n op_version=operator.target_opset,\n name=nms_node.name + '_topK', **attrs)\n\n class_idx_cast = scope.get_unique_variable_name(operator.output_full_names[0] + '_class_idx_cast')\n attrs = {'to': 1}\n container.add_node('Cast', class_idx_output, class_idx_cast, op_version=operator.target_opset,\n name=nms_node.name+'_class_idx_cast', **attrs)\n # output shape: [num_selected_indices, 1]\n\n concat_var = scope.get_unique_variable_name(operator.output_full_names[0] + '_concat_var')\n concat_node = next((nd_ for nd_ in operator.nodelist if nd_.type == 'Concat'), operator.nodelist[0])\n attrs = {'axis': 1}\n container.add_node(\"Concat\",\n [box_gather, class_idx_cast, score_gather_unsqueeze],\n concat_var,\n op_version=operator.target_opset,\n name=concat_node.name, **attrs)\n # output shape: [num_selected_indices, 6]\n\n all_gather = scope.get_unique_variable_name(operator.output_full_names[0] + '_all_gather')\n attrs = {'axis': 0}\n container.add_node(\"Gather\",\n [concat_var, score_top_k_output_idx],\n all_gather,\n op_version=operator.target_opset,\n name=nms_node.name + '_all_gather', **attrs)\n # output shape: [num_top_K, 6]\n padded_result = oopb.add_node('Pad',\n [all_gather,\n np.array([0, 0, DETECTION_MAX_INSTANCES, 0],\n dtype=np.int64)],\n nms_node.name + '_padded_result',\n op_domain='com.microsoft')\n detection_final = oopb.add_node('Slice',\n [padded_result,\n ('_start', oopb.int64, np.array([0], dtype='int64')),\n ('_end', oopb.int64, np.array([DETECTION_MAX_INSTANCES], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n nms_node.name + '_detection_final'\n )\n\n attrs = {'axes': [0]}\n container.add_node(\"Unsqueeze\",\n detection_final,\n operator.output_full_names[0],\n op_version=operator.target_opset,\n name=nms_node.name + '_concat_unsqueeze', **attrs)\n # output shape: [1, num_top_K, 6]\n\n\n# This is for Pad opset 11 which is now a contrib op, TODO: need onnx schema update for Pad\ndef on_Pad(ctx, node, name, args):\n node.type = \"Pad\"\n node.domain = 'com.microsoft'\n mode = node.get_attr(\"mode\")\n if mode:\n mode = mode.s.decode(\"utf-8\").lower()\n node.set_attr(\"mode\", mode)\n if mode not in [None, \"constant\", \"reflect\"]:\n raise ValueError(mode + \" pad mode is not supported\")\n\n origin_dtype = ctx.get_dtype(node.output[0])\n cast_node = ctx.insert_new_node_on_input(node, \"Cast\", node.input[1])\n cast_node.set_attr(\"to\", onnx_pb.TensorProto.INT64)\n ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT64)\n ctx.copy_shape(node.name, cast_node.output[0])\n\n attrs = {'perm': [1, 0]}\n transpose_node = ctx.make_node(\"Transpose\", [cast_node.output[0]], name=tf2onnx.utils.make_name(node.name),\n attr=attrs)\n\n const_name = tf2onnx.utils.make_name(node.name)\n\n const_array = ctx.make_const(const_name, np.array([-1], dtype=np.int64))\n\n reshape = ctx.make_node(\"Reshape\", [transpose_node.output[0], const_array.output[0]])\n ctx.replace_input(node, node.input[1], reshape.output[0])\n\n if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT,\n onnx_pb.TensorProto.DOUBLE]:\n cast_node = ctx.insert_new_node_on_input(node, \"Cast\", node.input[0])\n cast_node.set_attr(\"to\", onnx_pb.TensorProto.FLOAT)\n ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT)\n ctx.copy_shape(node.name, cast_node.output[0])\n\n cast_back_node = ctx.insert_new_node_on_output(\"Cast\", node.output[0],\n name=tf2onnx.utils.make_name(node.name) + \"_castback\")\n cast_back_node.set_attr(\"to\", origin_dtype)\n ctx.set_dtype(cast_back_node.output[0], origin_dtype)\n ctx.copy_shape(node.name, cast_back_node.output[0])\n\n\ndef on_CropAndResize(ctx, node, name, args):\n node.type = \"CropAndResize\"\n node.domain = 'com.microsoft'\n mode = node.get_attr(\"method\")\n if mode:\n mode_value = helper.get_attribute_value(mode)\n del node.attr['method']\n node.set_attr(\"mode\", mode_value)\n\n transpose_node = ctx.insert_new_node_on_input(node, \"Transpose\", node.input[0])\n transpose_node.set_attr(\"perm\", [0, 3, 1, 2])\n ctx.set_dtype(transpose_node.output[0], onnx_pb.TensorProto.INT64)\n\n transpose_node_2 = ctx.insert_new_node_on_output(\"Transpose\", node.output[0],\n name=tf2onnx.utils.make_name(node.name) + \"_transpose_final\")\n transpose_node_2.set_attr(\"perm\", [0, 2, 3, 1])\n ctx.set_dtype(transpose_node_2.output[0], onnx_pb.TensorProto.INT64)\n\n\ndef on_GatherNd(ctx, node, name, args):\n node.type = \"GatherND\"\n node.domain = \"com.microsoft\"\n\n\ntf2onnx_contrib_op_conversion = {\n 'GatherNd': (on_GatherNd, []),\n 'CropAndResize': (on_CropAndResize, []),\n 'Pad': (on_Pad, []),\n 'PadV2': (on_Pad, [])\n }\n\n\nset_converter(DetectionLayer, convert_DetectionLayer)\nset_converter(BatchNorm, convert_BatchNorm)\n\n\n# Run detection\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\n\ndef generate_image(images, molded_images, windows, results):\n results_final = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks = \\\n model.unmold_detections(results[0][i], results[3][i], # detections[i], mrcnn_mask[i]\n image.shape, molded_images[i].shape,\n windows[i])\n results_final.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n r = results_final[i]\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'])\n return results_final\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Need an image file for object detection.\")\n exit(-1)\n\n model_file_name = './mrcnn.onnx'\n if not os.path.exists(model_file_name):\n # use opset 10 or later\n oml = keras2onnx.convert_keras(model.keras_model, target_opset=10, custom_op_conversions=tf2onnx_contrib_op_conversion)\n onnx.save_model(oml, model_file_name)\n\n # run with ONNXRuntime\n import onnxruntime\n filename = sys.argv[1]\n image = skimage.io.imread(filename)\n images = [image]\n\n sess = onnxruntime.InferenceSession(model_file_name)\n\n # preprocessing\n molded_images, image_metas, windows = model.mold_inputs(images)\n anchors = model.get_anchors(molded_images[0].shape)\n anchors = np.broadcast_to(anchors, (model.config.BATCH_SIZE,) + anchors.shape)\n\n results = \\\n sess.run(None, {\"input_image\": molded_images.astype(np.float32),\n \"input_anchors\": anchors,\n \"input_image_meta\": image_metas.astype(np.float32)})\n\n # postprocessing\n results_final = generate_image(images, molded_images, windows, results)\n",
"import numpy as np\nfrom ..proto import keras\nfrom ..common.onnx_ops import apply_add, apply_mul, apply_sub, apply_identity\nfrom ..common.onnx_ops import apply_mean, apply_max, OnnxOperatorBuilder\n\n_merge_layer_handlers = {keras.layers.Add: apply_add, keras.layers.Multiply: apply_mul,\n keras.layers.Subtract: apply_sub, keras.layers.Average: apply_mean,\n keras.layers.Maximum: apply_max}\n\n\ndef convert_keras_merge_layer(scope, operator, container):\n op = operator.raw_operator\n if isinstance(op, keras.layers.Subtract) and len(operator.inputs) > 2:\n raise RuntimeError(\n 'Expected two inputs but got %s. Their names are %s' % (len(operator.inputs), operator.input_full_names))\n\n apply_merge_operation = _merge_layer_handlers[type(op)]\n\n intermediate_tensor_name = None\n for i in range(len(operator.inputs) - 1):\n if i == 0:\n left_tensor_name = operator.inputs[0].full_name\n right_tensor_name = operator.inputs[1].full_name\n op_name = operator.full_name\n else:\n if intermediate_tensor_name is None:\n raise RuntimeError('Tensor name cannot be None')\n left_tensor_name = intermediate_tensor_name\n right_tensor_name = operator.inputs[i + 1].full_name\n op_name = scope.get_unique_operator_name('Merge')\n\n if (len(operator.inputs) == 2 and i == 0) or (len(operator.inputs) > 2 and i == len(operator.inputs) - 2):\n # At the last iteration, we need to put the result to Keras layer's output tensor\n intermediate_tensor_name = operator.outputs[0].full_name\n else:\n # Keep accumulate changes through iterations using buffer tensors\n intermediate_tensor_name = scope.get_unique_variable_name('intermediate_tensor')\n apply_merge_operation(scope, [left_tensor_name, right_tensor_name], intermediate_tensor_name, container)\n\n if operator.output_masks:\n # Keras merge layer compute mask\n # masks = [array_ops.expand_dims(m, axis=0) for m in mask if m is not None]\n # return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)\n oopb = OnnxOperatorBuilder(container, scope)\n expanded = []\n for idx_, i_ in enumerate(operator.input_masks):\n expanded.append(oopb.add_node('Unsqueeze', i_.full_name, i_.full_name + '_i' + str(idx_), axes=[0]))\n\n if len(expanded) > 1:\n concat = oopb.apply_concat(expanded, name=operator.full_name + '_concat')\n else:\n concat = expanded[0]\n cast = oopb.add_node('Cast', concat, name=operator.full_name + '_cast', to=1)\n reduced = oopb.add_node('ReduceSum', cast, name=operator.full_name + '_reduced', op_version=1, axes=[0], keepdims=0)\n oopb.add_node_with_output('Greater', [reduced, np.array([0], dtype=np.float32)],\n [operator.output_masks[0].full_name], name=operator.full_name + '_greater',\n op_version=7)\n"
] |
[
[
"numpy.array",
"numpy.broadcast_to"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ngc7331/ArkGachaStatistics
|
[
"88450218371fe62efcd798acf4e2e7f06a3a55cc"
] |
[
"ArkGachaStatistics.py"
] |
[
"import argparse\nimport json\nimport os\nimport re\nimport time\nimport matplotlib.pyplot as plt\nimport msedge.selenium_tools\nimport pylab\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\n\nrec = []\nUID = ''\ncookies = []\nolddate = 0\n\nparser = argparse.ArgumentParser(description='Arknights Gacha Statistics - 一个明日方舟抽卡统计工具', add_help= True)\nparser.add_argument('-b', '--browser', choices=['chrome', 'edge'], default='chrome', help='设置使用的浏览器.')\nparser.add_argument('-d', '--debug', action='store_true', help='输出调试信息.')\nparser.add_argument('-e', '--export', action='store_true', help='直接从已有数据导出图片.')\nparser.add_argument('-f', '--file', metavar='filename', default='log', help='设置记录的文件名(默认为log.json).')\nparser.add_argument('-m', '--minimum-rarity', type=int, choices=range(3, 7), default=4, help='设置单角色统计最低星级(3~6的整数,默认为4).')\nparser.add_argument('-r', '--reset', action='store_true', help='清除历史记录.')\nparser.add_argument('-s', '--skip-inquiry', action='store_true', help='跳过从官网更新抽卡数据.')\nparser.add_argument('--skip-draw', action='store_true', help='跳过画图.')\nargs = parser.parse_args()\n\n# 指定默认字体\npylab.rcParams['font.sans-serif'] = ['SimHei']\npylab.rcParams['axes.unicode_minus'] = False\n\ndef debug(m):\n if (args.debug):\n print('[Debug] %s' % m)\n\ndef inquiry():\n global cookies, UID, rec\n\n #初始化浏览器\n if (args.browser == 'chrome'):\n options = webdriver.ChromeOptions()\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n browser = webdriver.Chrome(executable_path='driver/chromedriver.exe', options=options)\n elif (args.browser == 'edge'):\n options = msedge.selenium_tools.EdgeOptions()\n options.use_chromium = True\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n browser = msedge.selenium_tools.Edge(executable_path='driver/msedgedriver.exe', options=options)\n\n if (cookies):\n debug('Load cookies')\n debug(cookies)\n browser.get('https://ak.hypergryph.com/user/inquiryGacha')\n browser.delete_all_cookies()\n for cookie in cookies:\n browser.add_cookie(cookie)\n\n uid = ''\n while not uid:\n browser.get('https://ak.hypergryph.com/user/inquiryGacha')\n try:\n uid = browser.find_element_by_xpath('//*[@id=\"app\"]/div/div/div[3]/div[2]/div[2]/div[1]/span[2]').text\n valid = browser.find_element_by_xpath('//*[@id=\"app\"]/div/div/div[3]/div[2]/div[2]/div[1]/span[1]').text == 'UID'\n if (not valid):\n raise NoSuchElementException\n except NoSuchElementException:\n input('未登录!请手动登录后按回车继续')\n print('UID: %s' % uid)\n if (UID and UID != uid):\n print('UID与logs/%s.json中记录值(%s)不匹配,请检查' % (logfile, UID))\n exit()\n time.sleep(3)\n cookies = browser.get_cookies()\n i = 1\n new_rec = []\n loop = True\n while loop:\n print('------------\\nPage: %d' % i)\n lines = browser.find_elements_by_xpath('//*[@id=\"app\"]/div/div/div[3]/div[2]/div[2]/div[2]/div/table/tbody/tr')\n for line in lines:\n date, chars_l = line.find_elements_by_xpath('.//td')\n chars = chars_l.find_elements_by_xpath('.//li')\n if (time.mktime(time.strptime(date.text, '%Y-%m-%d %H:%M:%S')) <= olddate):\n loop = False\n break\n print(date.text, end=': ')\n for char in chars:\n char_class = char.get_attribute('class')\n if ('rarity-3' in char_class):\n rarity = 4\n elif ('rarity-4' in char_class):\n rarity = 5\n elif ('rarity-5' in char_class):\n rarity = 6\n else:\n rarity = 3\n print('%d★%s' % (rarity, char.text), end=' ')\n new_rec.append({\n 'date': date.text,\n 'name': char.text,\n 'rarity': rarity\n })\n print()\n next_page = browser.find_element_by_xpath('//*[@id=\"app\"]/div/div/div[3]/div[2]/div[2]/div[2]/ul/li[last()]/a')\n if (next_page.get_attribute('aria-disabled') == 'true' or not loop):\n break\n i += 1\n next_page.click()\n time.sleep(3)\n browser.close()\n #写入log\n print('Dump json')\n rec.extend(reversed(new_rec))\n with open('logs/%s.json' % logfile, 'w', encoding='UTF-8') as f:\n f.write(re.sub(\n '\\n\\s*?(\"date\":.*?)\\n\\s*?(\"name\":.*?)\\n\\s*?(\"rarity\":.*?)\\n\\s*?}',\n ' \\\\1 \\\\2 \\\\3 }',\n json.dumps(\n {'UID': uid, 'cookies': cookies, 'data': rec},\n indent=2,\n separators=(',', ': '),\n ensure_ascii=False)\n )\n )\n print('Done')\n time.sleep(1)\n return None\n\ndef draw():\n print('Draw picture')\n # 统计数据\n total = len(rec)\n count = [0, 0, 0, 0] # 3~6星计数\n trend = [[0], [0], [0], [0]] # 3~6星趋势\n chars = {} # 角色计数\n trend25 = [] #25抽分布\n tmp = [0, 0, 0, 0]\n for i in rec:\n name, rarity = i['name'], i['rarity']\n count[rarity-3] += 1\n for j in range(4):\n trend[j].append(trend[j][-1]+1 if rarity-3 == j else trend[j][-1])\n if (rarity >= args.minimum_rarity):\n chars.setdefault(name, 0)\n chars[name] += 1\n tmp[rarity-3] += 1\n if (sum(tmp) == 25 or i == rec[-1]):\n trend25.append(tmp)\n tmp = [0, 0, 0, 0]\n chars = sorted(chars.items(), key=lambda d:d[1], reverse=True)\n debug(count)\n debug(chars)\n debug(trend25)\n # 画图\n fig, axes = plt.subplots(\n 2, 2,\n figsize = (16, 9),\n gridspec_kw = dict(height_ratios=[9, 7], width_ratios=[2, 1])\n )\n colors = ['lightgrey', 'cyan', 'yellow', 'orange']\n labels = ['3★', '4★', '5★', '6★']\n plt.suptitle('Arknight Gacha Statistics', bbox={'facecolor':'0.9', 'pad':5})\n # 稀有度分布(饼图)\n axes[0, 0].pie(\n count,\n colors = colors,\n labels = labels,\n explode = [0.02, 0.02, 0.08, 0.16],\n pctdistance = 0.7,\n autopct = lambda x: '%d / %1.2f%%' % (round(x*total/100), x),\n wedgeprops = dict(edgecolor='black', linewidth=1)\n )\n axes[0, 0].text(-2.5, 0.4, '总计: %d抽' % total)\n axes[0, 0].text(-2.5, 0.2, '平均6★间距:%.2f' % (total/count[3]))\n axes[0, 0].text(-2.5, 0, '平均5★间距:%.2f' % (total/count[2]))\n # 稀有度累计趋势(堆积式折线图)\n axes[0, 1].set_title('稀有度累计趋势')\n axes[0, 1].stackplot(\n range(total+1), # x轴\n trend[3], trend[2], trend[1], trend[0], # 折线数据\n labels = reversed(labels), # 倒序排列 -> 高稀有度在下\n colors = reversed(colors)\n )\n axes[0, 1].legend(loc=2)\n # 角色统计(柱状图)\n axes[1, 0].set_title('%d★以上角色统计' % args.minimum_rarity)\n axes[1, 0].bar(\n [l[0] for l in chars], # 角色名 => 横轴\n [l[1] for l in chars] # 出货次数 => 纵轴\n )\n debug(len(chars))\n for tick in axes[1, 0].get_xticklabels(): # 旋转标签\n tick.set_rotation(45)\n tick.set_fontsize(max(20-0.26*len(chars), 6)) # 字体大小:对测试数据做线性拟合,最小为6\n # 每25(暂定)抽稀有度分布(堆积式柱状图)\n axes[1, 1].set_title('每25抽稀有度分布')\n for i in range(4):\n axes[1, 1].bar(\n range(len(trend25)),\n [l[i] for l in trend25],\n bottom = list(map(lambda l: sum(l[i+1:]), [l for l in trend25])),\n color = colors[i],\n tick_label = list(map(lambda x: str(x*25), range(len(trend25))))\n )\n # 保存&显示\n filename = '%s %s' % (logfile, rec[-1]['date'].replace(':', '-'))\n if (args.export):\n fig.savefig('logs/%s.jpg' % filename)\n print('Saved to logs/%s.jpg' % filename)\n return None\n fig.show()\n ans = input('是否保存图片Y/n? ')\n if (ans.lower() == 'y' or not ans):\n fig.savefig('logs/%s.jpg' % filename)\n print('Saved to logs/%s.jpg' % filename)\n else:\n print('Not save')\n return None\n\nif (__name__ == '__main__'):\n logfile = args.file.replace('.json', '')\n try:\n with open('logs/%s.json' % logfile, 'r', encoding='UTF-8') as f:\n log = json.load(f)\n rec = log['data']\n UID = log['UID']\n cookies = log['cookies']\n olddate = time.mktime(time.strptime(rec[-1]['date'], '%Y-%m-%d %H:%M:%S'))\n except FileNotFoundError:\n if(not os.path.exists('logs')):\n os.mkdir('logs')\n if (args.skip_inquiry or args.export):\n print('错误:未找到指定的log文件,请检查-f参数,或不使用-s/-e')\n exit()\n except KeyError as e:\n debug(e)\n rec = log # 处理0422以前版本数据\n if (args.reset):\n try:\n for root, dirs, files in os.walk('logs', topdown=False):\n for name in files:\n debug('remove %s' % os.path.join(root, name))\n os.remove(os.path.join(root, name))\n except:\n print('没有文件需要删除')\n exit()\n if (not (args.skip_inquiry or args.export)):\n inquiry()\n if (not args.skip_draw):\n draw()\n print('All Done, exiting...')"
] |
[
[
"matplotlib.pyplot.suptitle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arpanmangal/action-detection
|
[
"fedad54b22a25b5fd12d3bc2cc48bda1a7d6bbfd"
] |
[
"load_binary_score.py"
] |
[
"import torch.utils.data as data\n\nimport os\nimport os.path\nfrom numpy.random import randint\nfrom ops.io import load_proposal_file\nfrom transforms import *\nfrom ops.utils import temporal_iou\n\n\n\nclass BinaryInstance:\n\n def __init__(self, start_frame, end_frame, video_frame_count,\n fps=1, label=None, overlap_self=None, iou=1.0):\n self.start_frame = start_frame\n self.end_frame = min(end_frame, video_frame_count)\n self._label = label\n self.fps = fps\n self.iou = iou\n self.coverage = (end_frame - start_frame) / video_frame_count\n\n self.overlap_self = overlap_self\n\n @property\n def start_time(self):\n return self.start_frame / self.fps\n \n @property\n def label(self):\n return self._lable if self._label is not None else -1\n\n\n\n\n\n\nclass BinaryVideoRecord:\n def __init__(self, prop_record):\n self._data = prop_record\n# print(prop_record)\n\n frame_count = int(self._data[1])\n \n # build instance record\n self.fps = 1\n self.gt = [\n BinaryInstance(int(x[1]), int(x[2]), frame_count, label=int(x[0]), iou = 1.0) for x in self._data[2]\n if int(x[2]) > int(x[1])\n ]\n self.gt = list(filter(lambda x: x.start_frame < frame_count, self.gt))\n \n self.proposals = [\n BinaryInstance(int(x[3]), int(x[4]), frame_count, label=int(x[0]), iou = float(x[1]),\n overlap_self = float(x[2])) for x in self._data[3] if int(x[4]) > int(x[3])\n ]\n\n self.proposals = list(filter(lambda x:x.start_frame < frame_count, self.proposals))\n\n @property\n def id(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n return int(self._data[1])\n\n def get_fg(self, fg_thresh, with_gt=True):\n fg = [p for p in self.proposals if p.iou > fg_thresh]\n if with_gt:\n fg.extend(self.gt)\n return fg\n\n def get_bg(self, bg_thresh):\n bg = [p for p in self.proposals if p.iou < bg_thresh]\n return bg\n\n\n\nclass BinaryDataSet(data.Dataset):\n\n def __init__(self, root_path,\n prop_file = None,\n body_seg=5, video_centric=True,\n new_length=1, modality='RGB',\n image_tmpl='img_{:05d}.jpg', transform=None,\n random_shift=True, test_mode=False,\n prop_per_video=12, fg_ratio=3, bg_ratio=9,\n fg_iou_thresh=0.7,\n bg_iou_thresh=0.01,\n bg_coverage_thresh=0.02,\n gt_as_fg=True, test_interval=6, verbose=True,\n exclude_empty=True, epoch_multiplier=1):\n\n self.root_path = root_path\n self.prop_file = prop_file\n self.verbose = verbose\n\n self.body_seg = body_seg\n self.video_centric = video_centric\n self.exclude_empty = exclude_empty\n self.epoch_multiplier = epoch_multiplier\n\n self.new_length = new_length\n self.modality = modality\n self.image_tmpl = image_tmpl\n self.transform = transform\n self.random_shift = random_shift\n self.test_mode = test_mode\n self.test_interval = test_interval\n\n self.fg_iou_thresh = fg_iou_thresh\n self.bg_iou_thresh = bg_iou_thresh\n\n self.bg_coverage_thresh = bg_coverage_thresh\n self.starting_ratio = 0.5\n self.ending_ratio = 0.5\n\n self.gt_as_fg = gt_as_fg\n denum = fg_ratio + bg_ratio\n\n self.fg_per_video = int(prop_per_video * (fg_ratio / denum))\n self.bg_per_video = int(prop_per_video * (bg_ratio / denum))\n\n self._parse_prop_file()\n\n def _parse_prop_file(self):\n prop_info = load_proposal_file(self.prop_file)\n \n self.video_list = [BinaryVideoRecord(p) for p in prop_info]\n\n if self.exclude_empty:\n self.video_list = list(filter(lambda x: len(x.gt) > 0, self.video_list))\n\n self.video_dict = {v.id: v for v in self.video_list}\n\n # construct two pools:\n # 1. Foreground\n # 2. Background\n\n self.fg_pool = []\n self.bg_pool = []\n\n for v in self.video_list:\n self.fg_pool.extend([(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)])\n self.bg_pool.extend([(v.id, prop) for prop in v.get_bg(self.bg_iou_thresh)])\n\n if self.verbose:\n print(\"\"\"\n \n BinaryDataSet: Proposal file {prop_file} parse.\n\n There are {pnum} usable proposals from {vnum} videos.\n {fnum} foreground proposals\n {bnum} background proposals\n\n Sampling config:\n FG/BG: {fr}/{br}\n \n Epoch size muiltiplier: {em}\n \"\"\".format(prop_file=self.prop_file, pnum=len(self.fg_pool) + len(self.bg_pool),\n fnum=len(self.fg_pool), bnum=len(self.bg_pool),\n fr = self.fg_per_video, br=self.bg_per_video, vnum=len(self.video_dict),\n em=self.epoch_multiplier))\n else:\n print(\"\"\"\n BinaryDataset: proposal file {prop_file} parsed.\n \"\"\".format(prop_file=self.prop_file))\n # return self.video_list\n\n\n def __getitem__(self, index):\n real_index = index % len(self.video_list)\n if self.test_mode:\n return self.get_test_data(self.video_list[real_index], self.test_interval)\n else:\n return self.get_training_data(real_index)\n\n def _sample_frames(self, prop):\n start_frame = prop.start_frame + 1\n end_frame = prop.end_frame\n duration = end_frame - start_frame + 1\n sample_duration = duration / self.body_seg \n\n if sample_duration < 1:\n return start_frame + randint(prop.end_frame - prop.start_frame, size = self.body_seg)\n\n frame_indice = []\n split_stage = [int(np.round(i*sample_duration)) + start_frame for i in range(self.body_seg+1) ]\n \n for i in range(self.body_seg):\n # print(split_stage[i], split_stage[i+1])\n index = np.random.choice(range(split_stage[i],split_stage[i+1]), 1)\n frame_indice.extend(index)\n return frame_indice\n \n\n def _load_image(self, directory, idx):\n if self.modality == 'RGB' or self.modality == 'RGBDiff':\n return [Image.open(os.path.join(directory, self.image_tmpl.format(idx))).convert('RGB')]\n elif self.modality == 'Flow':\n x_img = Image.open(os.path.join(directory, self.image_tmpl.format('x', idx))).convert('L')\n y_img = Image.open(os.path.join(directory, self.image_tmpl.format('y', idx))).convert('L')\n\n return [x_img, y_img]\n \n\n def _load_prop_data(self, prop):\n\n # read frame count\n frame_cnt = self.video_dict[prop[0][0]].num_frames\n # frame_cnt = 1572 \n frame_selected = self._sample_frames(prop[0][1])\n frames = []\n for idx in frame_selected:\n for x in range(self.new_length):\n frames.extend(self._load_image(prop[0][0], min(frame_cnt, idx+x)))\n\n return frames, prop[1]\n # sample segment indices\n\n\n def _video_centric_sampling(self, video):\n \n fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)\n bg = video.get_bg(self.bg_iou_thresh)\n \n def sample_video_proposals(proposal_type, video_id, video_pool, requested_num, dataset_pool):\n if len(video_pool) == 0:\n # if there is noting in the video pool, go fetch from the dataset pool\n return [(dataset_pool[x], proposal_type) for x in np.random.choice(len(dataset_pool), requested_num, replace=False)]\n else:\n replicate = len(video_pool) < requested_num\n idx = np.random.choice(len(video_pool), requested_num, replace = replicate)\n return [((video_id, video_pool[x]), proposal_type) for x in idx]\n\n out_props = []\n out_props.extend(sample_video_proposals(1, video.id, fg, self.fg_per_video, self.fg_pool)) # sample foreground\n out_props.extend(sample_video_proposals(0, video.id, bg, self.bg_per_video, self.bg_pool)) # sample background\n \n return out_props\n\n\n def get_training_data(self, index):\n\n video = self.video_list[index]\n props = self._video_centric_sampling(video)\n \n out_frames = []\n out_prop_len = []\n out_prop_type = []\n \n frames = []\n for idx, p in enumerate(props):\n prop_frames, prop_type = self._load_prop_data(p)\n processed_frames = self.transform(prop_frames)\n out_frames.append(processed_frames)\n out_prop_type.append(prop_type)\n\n out_prop_type = torch.from_numpy(np.array(out_prop_type))\n out_frames = torch.cat(out_frames)\n return out_frames, out_prop_type\n\n\n def get_test_data(self, video, test_interval, gen_batchsize=4):\n props = []\n video_id = video.id\n frame_cnt = video.num_frames\n\n frame_ticks = np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1\n num_sampled_frames = len(frame_ticks)\n\n # avoid empty proposal list\n for i in frame_ticks:\n props.append(BinaryInstance(i, i+1, 1))\n\n proposal_tick_list = []\n\n for proposal in props:\n proposal_ticks = proposal.start_frame, proposal.end_frame\n proposal_tick_list.append(proposal_ticks)\n\n # load frames\n # Since there are many frames for each video during testing, instead of returning the read frames\n # we return a generator which gives the frames in samll batches, this lower the momeory burden\n # runtime overhead. Usually stting batchsize=4 would fit most cases.\n\n def frame_gen(batchsize):\n frames= []\n cnt = 0\n for idx, seg_ind in enumerate(frame_ticks):\n p = int(seg_ind)\n for x in range(self.new_length):\n frames.extend(self._load_image(video_id, min(frame_cnt, p+x)))\n cnt += 1\n\n if cnt % batchsize == 0:\n frames = self.transform(frames)\n yield frames\n frames = []\n \n if len(frames):\n frames = self.transform(frames)\n yield frames\n\n return frame_gen(gen_batchsize), len(frame_ticks)\n\n\n\n def __len__(self):\n return len(self.video_list) * self.epoch_multiplier \n"
] |
[
[
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
christophersampson/digit_recognition_demo
|
[
"2269354761d7ae64a3d165bb7fbfa8f023c0725f"
] |
[
"confusion_matrix.py"
] |
[
"\"\"\"\nhttp://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py\n\"\"\"\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import confusion_matrix\n\ndef plot_confusion_matrix(cm, classes,\n normalise=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n\n if normalise:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalise else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\ndef create_confusion_matrix(actual, predicted, class_names, normalise=False):\n\n cnf_matrix = confusion_matrix(actual,\n predicted)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix',\n normalise=normalise)\n plt.show()"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.set_printoptions",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xternalz/SDPoint
|
[
"0013c5dafe80780ea749198ebc42824c3ed41e6c"
] |
[
"main.py"
] |
[
"import argparse\nimport os\nimport shutil\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport models\nimport utils.flops as flops\n\nmodel_names = sorted(name for name in models.__dict__\n\tif name.islower() and not name.startswith(\"__\")\n\tand callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training - Stochastic Downsampling')\nparser.add_argument('data', metavar='DIR',\n\t\t\t\t\thelp='path to dataset')\nparser.add_argument('--arch', '-a', metavar='ARCH', default='preresnet101',\n\t\t\t\t\tchoices=model_names,\n\t\t\t\t\thelp='model architecture: ' +\n\t\t\t\t\t\t' | '.join(model_names) +\n\t\t\t\t\t\t' (default: preresnet101)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n\t\t\t\t\thelp='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=115, type=int, metavar='N',\n\t\t\t\t\thelp='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n\t\t\t\t\thelp='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n\t\t\t\t\tmetavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('-vb', '--val-batch-size', default=1024, type=int,\n\t\t\t\t\tmetavar='N', help='validation mini-batch size (default: 1024)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n\t\t\t\t\tmetavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n\t\t\t\t\thelp='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n\t\t\t\t\tmetavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n\t\t\t\t\tmetavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n\t\t\t\t\thelp='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n\t\t\t\t\thelp='evaluate model on validation set')\nparser.add_argument('--world-size', default=1, type=int,\n\t\t\t\t\thelp='number of distributed processes')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n\t\t\t\t\thelp='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='gloo', type=str,\n\t\t\t\t\thelp='distributed backend')\nparser.add_argument('--val-results-path', default='val_results.txt', type=str,\n\t\t\t\t\thelp='filename of the file for writing validation results')\n\nbest_prec1 = 0\n\n\ndef main():\n\tglobal args, best_prec1\n\targs = parser.parse_args()\n\n\targs.distributed = args.world_size > 1\n\n\tif args.distributed:\n\t\tdist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n\t\t\t\t\t\t\t\tworld_size=args.world_size)\n\n\t# create model\n\tprint(\"=> creating model '{}'\".format(args.arch))\n\tmodel = models.__dict__[args.arch]()\n\n\tif not args.distributed:\n\t\tmodel = torch.nn.DataParallel(model).cuda()\n\telse:\n\t\tmodel.cuda()\n\t\tmodel = torch.nn.parallel.DistributedDataParallel(model)\n\n\t# define loss function (criterion) and optimizer\n\tcriterion = nn.CrossEntropyLoss().cuda()\n\n\toptimizer = torch.optim.SGD(model.parameters(),\n\t\t\t\t\t\t\t\targs.lr, momentum=args.momentum,\n\t\t\t\t\t\t\t\tweight_decay=args.weight_decay)\n\n\t# optionally resume from a checkpoint\n\tif args.resume:\n\t\tif os.path.isfile(args.resume):\n\t\t\tprint(\"=> loading checkpoint '{}'\".format(args.resume))\n\t\t\tcheckpoint = torch.load(args.resume)\n\t\t\targs.start_epoch = checkpoint['epoch']\n\t\t\tmodel.load_state_dict(checkpoint['state_dict'])\n\t\t\toptimizer.load_state_dict(checkpoint['optimizer'])\n\t\t\tprint(\"=> loaded checkpoint '{}' (epoch {})\"\n\t\t\t\t .format(args.resume, checkpoint['epoch']))\n\t\telse:\n\t\t\tprint(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n\tcudnn.benchmark = True\n\n\t# Data loading code\n\ttraindir = os.path.join(args.data, 'train')\n\tvaldir = os.path.join(args.data, 'val')\n\tnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n\t\t\t\t\t\t\t\t\t std=[0.229, 0.224, 0.225])\n\n\ttrain_dataset = datasets.ImageFolder(\n\t\ttraindir,\n\t\ttransforms.Compose([\n\t\t\ttransforms.RandomResizedCrop(224),\n\t\t\ttransforms.RandomHorizontalFlip(),\n\t\t\ttransforms.ToTensor(),\n\t\t\tnormalize,\n\t\t]))\n\n\tif args.distributed:\n\t\ttrain_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n\telse:\n\t\ttrain_sampler = None\n\n\tif args.evaluate:\n\t\targs.batch_size = args.val_batch_size\n\n\ttrain_loader = torch.utils.data.DataLoader(\n\t\ttrain_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n\t\tnum_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n\tval_loader = torch.utils.data.DataLoader(\n\t\tdatasets.ImageFolder(valdir, transforms.Compose([\n\t\t\ttransforms.Resize(256),\n\t\t\ttransforms.CenterCrop(224),\n\t\t\ttransforms.ToTensor(),\n\t\t\tnormalize,\n\t\t])),\n\t\tbatch_size=args.batch_size, shuffle=False,\n\t\tnum_workers=args.workers, pin_memory=True)\n\n\tif args.evaluate:\n\t\tmodel.eval()\n\t\tval_results_file = open(args.val_results_path, 'w')\n\t\tval_results_file.write('blockID\\tratio\\tflops\\ttop1-acc\\ttop5-acc\\t\\n')\n\t\tfor i in [-1] + [model.module.blockID] + list(range(model.module.blockID)):\n\t\t\tfor r in [0.5, 0.75]:\n\t\t\t\tmodel_flops = flops.calculate(model, i, r)\n\t\t\t\ttop1, top5 = validate(train_loader, val_loader, model, criterion, i, r)\n\t\t\t\tval_results_file.write('{0}\\t{1}\\t{2}\\t{top1:.3f}\\t{top5:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\ti if i>-1 else 'nil', r if i>-1 else 'nil',\n\t\t\t\t\t\t\t\t\t\tmodel_flops, top1=top1, top5=top5))\n\t\t\t\tif i == -1:\n\t\t\t\t\tbreak\n\t\tval_results_file.close()\n\t\treturn\n\n\tfor epoch in range(args.start_epoch, args.epochs):\n\t\tif args.distributed:\n\t\t\ttrain_sampler.set_epoch(epoch)\n\t\tadjust_learning_rate(optimizer, epoch)\n\n\t\t# train for one epoch\n\t\ttrain(train_loader, model, criterion, optimizer, epoch)\n\n\t\tsave_checkpoint({\n\t\t\t'epoch': epoch + 1,\n\t\t\t'arch': args.arch,\n\t\t\t'state_dict': model.state_dict(),\n\t\t\t'optimizer' : optimizer.state_dict(),\n\t\t})\n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n\tbatch_time = AverageMeter()\n\tdata_time = AverageMeter()\n\tlosses = AverageMeter()\n\ttop1 = AverageMeter()\n\ttop5 = AverageMeter()\n\n\t# switch to train mode\n\tmodel.train()\n\n\tend = time.time()\n\tfor i, (input, target) in enumerate(train_loader):\n\t\t# measure data loading time\n\t\tdata_time.update(time.time() - end)\n\n\t\ttarget = target.cuda(non_blocking=True)\n\n\t\t# compute output\n\t\toutput = model(input)\n\t\tloss = criterion(output, target)\n\n\t\t# measure accuracy and record loss\n\t\tprec1, prec5 = accuracy(output, target, topk=(1, 5))\n\t\tlosses.update(loss.item(), input.size(0))\n\t\ttop1.update(prec1[0], input.size(0))\n\t\ttop5.update(prec5[0], input.size(0))\n\n\t\t# compute gradient and do SGD step\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\t# measure elapsed time\n\t\tbatch_time.update(time.time() - end)\n\t\tend = time.time()\n\n\t\tif i % args.print_freq == 0:\n\t\t\tprint('Epoch: [{0}][{1}/{2}]\\t'\n\t\t\t\t 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n\t\t\t\t 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n\t\t\t\t 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n\t\t\t\t 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n\t\t\t\t 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n\t\t\t\t epoch, i, len(train_loader), batch_time=batch_time,\n\t\t\t\t data_time=data_time, loss=losses, top1=top1, top5=top5))\n\n\ndef validate(train_loader, val_loader, model, criterion, blockID, ratio):\n\tbatch_time = AverageMeter()\n\tdata_time = AverageMeter()\n\tlosses = AverageMeter()\n\ttop1 = AverageMeter()\n\ttop5 = AverageMeter()\n\n\t# switch to train mode\n\tmodel.train()\n\n\twith torch.no_grad():\n\t\tend = time.time()\n\t\tfor i, (input, _) in enumerate(train_loader):\n\t\t\t# measure data loading time\n\t\t\tdata_time.update(time.time() - end)\n\n\t\t\tinput = input.cuda()\n\n\t\t\t# compute output\n\t\t\toutput = model(input, blockID=blockID, ratio=ratio)\n\n\t\t\t# measure elapsed time\n\t\t\tbatch_time.update(time.time() - end)\n\t\t\tend = time.time()\n\n\t\t\tif i % args.print_freq == 0:\n\t\t\t\tprint('Iteration: [{0}/{1}]\\t'\n\t\t\t\t\t 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n\t\t\t\t\t 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'.format(\n\t\t\t\t\t i, len(train_loader), batch_time=batch_time,\n\t\t\t\t\t data_time=data_time))\n\n\t# switch to evaluate mode\n\tmodel.eval()\n\n\twith torch.no_grad():\n\t\tend = time.time()\n\t\tfor i, (input, target) in enumerate(val_loader):\n\t\t\tinput = input.cuda()\n\t\t\ttarget = target.cuda(non_blocking=True)\n\n\t\t\t# compute output\n\t\t\toutput = model(input, blockID=blockID, ratio=ratio)\n\t\t\tloss = criterion(output, target)\n\n\t\t\t# measure accuracy and record loss\n\t\t\tprec1, prec5 = accuracy(output, target, topk=(1, 5))\n\t\t\tlosses.update(loss.item(), input.size(0))\n\t\t\ttop1.update(prec1[0], input.size(0))\n\t\t\ttop5.update(prec5[0], input.size(0))\n\n\t\t\t# measure elapsed time\n\t\t\tbatch_time.update(time.time() - end)\n\t\t\tend = time.time()\n\n\t\t\tif i % args.print_freq == 0:\n\t\t\t\tprint('Test: [{0}/{1}]\\t'\n\t\t\t\t\t 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n\t\t\t\t\t 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n\t\t\t\t\t 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n\t\t\t\t\t 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n\t\t\t\t\t i, len(val_loader), batch_time=batch_time, loss=losses,\n\t\t\t\t\t top1=top1, top5=top5))\n\n\t\tprint(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n\t\t\t .format(top1=top1, top5=top5))\n\n\treturn top1.avg, top5.avg\n\n\ndef save_checkpoint(state, filename='checkpoint.pth.tar'):\n\ttorch.save(state, filename)\n\n\nclass AverageMeter(object):\n\t\"\"\"Computes and stores the average and current value\"\"\"\n\tdef __init__(self):\n\t\tself.reset()\n\n\tdef reset(self):\n\t\tself.val = 0\n\t\tself.avg = 0\n\t\tself.sum = 0\n\t\tself.count = 0\n\n\tdef update(self, val, n=1):\n\t\tself.val = val\n\t\tself.sum += val * n\n\t\tself.count += n\n\t\tself.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch):\n\t\"\"\"Sets the learning rate to the initial LR decayed by 10 after the 40th, 75th, and 105th epochs\"\"\"\n\tlr = args.lr\n\tfor e in [40,75,105]:\n\t\tif epoch >= e:\n\t\t\tlr *= 0.1\n\tfor param_group in optimizer.param_groups:\n\t\tparam_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n\t\"\"\"Computes the precision@k for the specified values of k\"\"\"\n\twith torch.no_grad():\n\t\tmaxk = max(topk)\n\t\tbatch_size = target.size(0)\n\n\t\t_, pred = output.topk(maxk, 1, True, True)\n\t\tpred = pred.t()\n\t\tcorrect = pred.eq(target.view(1, -1).expand_as(pred))\n\n\t\tres = []\n\t\tfor k in topk:\n\t\t\tcorrect_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n\t\t\tres.append(correct_k.mul_(100.0 / batch_size))\n\t\treturn res\n\n\nif __name__ == '__main__':\n\tmain()\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.nn.DataParallel",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danielmk/pyDentateeLife2020
|
[
"b4a9f2beaa0c74dbc9583e2cf228856612596f8a",
"b4a9f2beaa0c74dbc9583e2cf228856612596f8a",
"b4a9f2beaa0c74dbc9583e2cf228856612596f8a",
"b4a9f2beaa0c74dbc9583e2cf228856612596f8a",
"df8f67d4523ce463701c5e5675e74e309dd151e7"
] |
[
"deprecated_nets/net_focal2.py",
"deprecated_nets/net_focal.py",
"analysis/model_get_perc_active.py",
"analysis/obsolete/patterns_get_sqrt-diff-norm_outputs.py",
"deprecated_nets/net_global.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements the class StandardNetwork.\nStandardNetwork creates a ring network as defined in Santhakumar et al. 2005\nwith some changes as in Yim et al. 2015.\nSee StandardNetwork docstring for details.\nCreated on Tue Nov 28 13:01:38 2017\n\n@author: DanielM\n\"\"\"\n\nfrom neuron import h, gui\nimport ouropy\nimport numpy as np\nfrom granulecell import GranuleCell\nfrom mossycell_cat import MossyCell\nfrom basketcell import BasketCell\nfrom hippcell import HippCell\n\n\nclass TunedNetwork(ouropy.gennetwork.GenNetwork):\n \"\"\" This model implements the ring model from Santhakumar et al. 2005.\n with some changes as in Yim et al. 2015.\n It features inhibition but omits the MC->GC connection.\n \"\"\"\n\n name = \"TunedNetwork\"\n def __init__(self, seed=None, temporal_patterns=np.array([]),\n spatial_patterns_gcs=np.array([]),\n spatial_patterns_bcs=np.array([])):\n self.init_params = locals()\n self.init_params['self'] = str(self.init_params['self'])\n # Setup cells\n self.mk_population(GranuleCell, 2000)\n self.mk_population(MossyCell, 60)\n self.mk_population(BasketCell, 24)\n self.mk_population(HippCell, 24)\n\n # Set seed for reproducibility\n if seed:\n self.set_numpy_seed(seed)\n\n # Setup recordings\n self.populations[0].record_aps()\n self.populations[1].record_aps()\n self.populations[2].record_aps()\n self.populations[3].record_aps()\n\n temporal_patterns = np.array(temporal_patterns)\n print(np.shape(temporal_patterns))\n #temporal_patterns = np.atleast_2d(temporal_patterns)\n if type(spatial_patterns_gcs) == np.ndarray and type(temporal_patterns) == np.ndarray:\n #spatial_patterns_gcs = np.atleast_2d(spatial_patterns_gcs)\n for pat in range(len(spatial_patterns_gcs)):\n # PP -> GC\n #Original\n ouropy.gennetwork.PerforantPathPoissonTmgsyn(self.populations[0],\n temporal_patterns[pat],\n spatial_patterns_gcs[pat],\n 'midd', 5.5, 0, 1, 0, 0, 1.25*10**(-3))\n\n if type(spatial_patterns_bcs) == np.ndarray and type(temporal_patterns) == np.ndarray:\n #spatial_patterns_bcs = np.atleast_2d(spatial_patterns_bcs)\n for pat in range(len(spatial_patterns_bcs)):\n # PP -> BC\n ouropy.gennetwork.PerforantPathPoissonTmgsyn(self.populations[2],\n temporal_patterns[pat],\n spatial_patterns_bcs[pat],\n 'ddend', 6.3, 0, 1, 0, 0, 1*10**(-3))\n\n # GC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[1],\n 12, 'proxd',\n 1, 6.2, 500, 0.1, 0, 0, 10, 1.5, 0.2*10**(-2) * 10)\n\n # GC -> BC\n #Weight x4, target_pool = 2\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[2],\n 8, 'proxd',\n 1, 0.6, 500, 0.1, 0, 0, 10, 0.8, 18.8*10**(-2))\n\n # GC -> HC\n # Divergence x4; Weight doubled; Connected randomly.\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[3],\n 24, 'proxd',\n 12, 0.6, 500, 0.1, 0, 0, 10, 1.5, 1.5*10**(-2))\n\n # MC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self. populations[1],\n 24, 'proxd',\n 3, 2.2, 0, 1, 0, 0, 10, 2, 0.5*10**(-3))\n\n # MC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self.populations[2],\n 12, 'proxd',\n 1, 0.1, 0, 1, 0, 0, 10, 3, 0.3*10**(-3))\n\n # MC -> HC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self.populations[3],\n 20, 'midd',\n 2, 3.6, 0, 1, 0, 0, 10, 3, 0.2*10**(-3))\n\n # BC -> GC\n # Nr. synapses x3; Weight *1/4; changed from 5.5 to 20 (Hefft & Jonas, 2005)\n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[0],\n 400, 'soma',\n 400, 20, 0, 1, 0, -70, 10, 0.85, 1.2*10**(-3))\n\n # We reseed here to make sure that those connections are consistent\n # between this and net_global. The only connection that differs between\n # net_tuned and net_global will be the BC -> GC connection.\n if seed:\n self.set_numpy_seed(seed)\n\n # BC -> MC \n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[1],\n 28, 'proxd',\n 3, 3.3, 0, 1, 0, -70, -10, 1.5, 1.5*10**(-3))\n\n # BC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[2],\n 12,'proxd',\n 2, 1.8, 0,1,0,-70, -10, 0.8, 7.6*10**(-3))\n\n # HC -> GC\n # Weight x10; Nr synapses x4; changed from 6 to 20 (Hefft & Jonas, 2005)\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[0],\n 1000, 'dd',\n 640, 20, 0, 1, 0, -70, 10, 1.6, 0.6*10**(-2))\n\n # HC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[1],\n 60, ['mid1d', 'mid2d'],\n 4, 6, 0, 1, 0, -70, 10, 1, 1.5*10**(-3))\n\n # HC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[2],\n 24, 'ddend',\n 4, 5.8, 0, 1, 0, -70, 10, 1.6, 0.5*10**(-3))\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements the class StandardNetwork.\nStandardNetwork creates a ring network as defined in Santhakumar et al. 2005\nwith some changes as in Yim et al. 2015.\nSee StandardNetwork docstring for details.\nCreated on Tue Nov 28 13:01:38 2017\n\n@author: DanielM\n\"\"\"\n\nfrom neuron import h, gui\nimport ouropy\nimport numpy as np\nfrom granulecell import GranuleCell\nfrom mossycell_cat import MossyCell\nfrom basketcell import BasketCell\nfrom hippcell import HippCell\n\n\nclass TunedNetwork(ouropy.gennetwork.GenNetwork):\n \"\"\" This model implements the ring model from Santhakumar et al. 2005.\n with some changes as in Yim et al. 2015.\n It features inhibition but omits the MC->GC connection.\n \"\"\"\n\n name = \"TunedNetwork\"\n def __init__(self, seed=None, temporal_patterns=np.array([]),\n spatial_patterns_gcs=np.array([]),\n spatial_patterns_bcs=np.array([])):\n self.init_params = locals()\n self.init_params['self'] = str(self.init_params['self'])\n # Setup cells\n self.mk_population(GranuleCell, 2000)\n self.mk_population(MossyCell, 60)\n self.mk_population(BasketCell, 24)\n self.mk_population(HippCell, 24)\n\n # Set seed for reproducibility\n if seed:\n self.set_numpy_seed(seed)\n\n # Setup recordings\n self.populations[0].record_aps()\n self.populations[1].record_aps()\n self.populations[2].record_aps()\n self.populations[3].record_aps()\n\n temporal_patterns = np.array(temporal_patterns)\n print(np.shape(temporal_patterns))\n #temporal_patterns = np.atleast_2d(temporal_patterns)\n if type(spatial_patterns_gcs) == np.ndarray and type(temporal_patterns) == np.ndarray:\n #spatial_patterns_gcs = np.atleast_2d(spatial_patterns_gcs)\n for pat in range(len(spatial_patterns_gcs)):\n # PP -> GC\n #Original\n ouropy.gennetwork.PerforantPathPoissonTmgsyn(self.populations[0],\n temporal_patterns[pat],\n spatial_patterns_gcs[pat],\n 'midd', 5.5, 0, 1, 0, 0, 1.25*10**(-3))\n\n if type(spatial_patterns_bcs) == np.ndarray and type(temporal_patterns) == np.ndarray:\n #spatial_patterns_bcs = np.atleast_2d(spatial_patterns_bcs)\n for pat in range(len(spatial_patterns_bcs)):\n # PP -> BC\n ouropy.gennetwork.PerforantPathPoissonTmgsyn(self.populations[2],\n temporal_patterns[pat],\n spatial_patterns_bcs[pat],\n 'ddend', 6.3, 0, 1, 0, 0, 1*10**(-3))\n\n # GC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[1],\n 12, 'proxd',\n 1, 6.2, 500, 0.1, 0, 0, 10, 1.5, 0.2*10**(-2) * 10)\n\n # GC -> BC\n #Weight x4, target_pool = 2\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[2],\n 8, 'proxd',\n 1, 0.6, 500, 0.1, 0, 0, 10, 0.8, 18.8*10**(-2))\n\n # GC -> HC\n # Divergence x4; Weight doubled; Connected randomly.\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[3],\n 24, 'proxd',\n 12, 0.6, 500, 0.1, 0, 0, 10, 1.5, 1.5*10**(-2))\n\n # MC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self. populations[1],\n 24, 'proxd',\n 3, 2.2, 0, 1, 0, 0, 10, 2, 0.5*10**(-3))\n\n # MC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self.populations[2],\n 12, 'proxd',\n 1, 0.1, 0, 1, 0, 0, 10, 3, 0.3*10**(-3))\n\n # MC -> HC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self.populations[3],\n 20, 'midd',\n 2, 3.6, 0, 1, 0, 0, 10, 3, 0.2*10**(-3))\n\n # BC -> GC\n # Nr. synapses x3; Weight *1/4; changed from 5.5 to 20 (Hefft & Jonas, 2005)\n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[0],\n 400, 'soma',\n 400, 20, 0, 1, 0, -70, 10, 0.85, 1.2*10**(-3))\n\n # We reseed here to make sure that those connections are consistent\n # between this and net_global. The only connection that differs between\n # net_tuned and net_global will be the BC -> GC connection.\n if seed:\n self.set_numpy_seed(seed)\n\n # BC -> MC \n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[1],\n 28, 'proxd',\n 3, 3.3, 0, 1, 0, -70, -10, 1.5, 1.5*10**(-3))\n\n # BC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[2],\n 12,'proxd',\n 2, 1.8, 0,1,0,-70, -10, 0.8, 7.6*10**(-3))\n\n # HC -> GC\n # Weight x10; Nr synapses x4; changed from 6 to 20 (Hefft & Jonas, 2005)\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[0],\n 2000, 'dd',\n 640, 20, 0, 1, 0, -70, 10, 1.6, 0.6*10**(-2))\n\n # HC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[1],\n 60, ['mid1d', 'mid2d'],\n 4, 6, 0, 1, 0, -70, 10, 1, 1.5*10**(-3))\n\n # HC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[2],\n 24, 'ddend',\n 4, 5.8, 0, 1, 0, -70, 10, 1.6, 0.5*10**(-3))\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 11 10:25:41 2018\n\n@author: daniel\n\"\"\"\nimport shelve\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n#Home PC\n#directory = \"C:\\\\Users\\\\daniel\\\\repos\\\\pyDentate\\paradigm_pattern-separation_saves_2018-03-11\\\\\"\n#Office PC\n#directory = \"Y:\\\\DanielM\\\\023_Dentate Gyrus Model\\\\paradigm_spatial-inhibition\\\\\"\n#Dropbox\n\n#data_files = [f for f in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, f)) and '.pydd' in f and not '.npz' in f]\n#data_files.sort()\n\ndef get_perc_active_n_aps(file_name):\n curr_data = shelve.open(data_path + x)\n active_gcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][0]['ap_number']) > 0),dtype = int).flatten()\n active_mcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][1]['ap_number']) > 0),dtype = int).flatten()\n active_bcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][2]['ap_number']) > 0),dtype = int).flatten()\n active_hcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][3]['ap_number']) > 0),dtype = int).flatten()\n\n n_aps_avg_gcs=np.array(curr_data[curr_data.keys()[0]]['populations'][0]['ap_number'])[active_gcs].mean()\n n_aps_avg_mcs=np.array(curr_data[curr_data.keys()[0]]['populations'][1]['ap_number'])[active_mcs].mean()\n n_aps_avg_bcs=np.array(curr_data[curr_data.keys()[0]]['populations'][2]['ap_number'])[active_bcs].mean()\n n_aps_avg_hcs=np.array(curr_data[curr_data.keys()[0]]['populations'][3]['ap_number'])[active_hcs].mean()\n\n curr_data.close()\n perc_active_gcs = (len(active_gcs) / 2000.0)*100\n perc_active_mcs = (len(active_mcs) / 60.0)*100\n perc_active_bcs = (len(active_bcs) / 24.0)*100\n perc_active_hcs = (len(active_hcs) / 24.0)*100\n\n return [perc_active_gcs, perc_active_mcs, perc_active_bcs, perc_active_hcs], [n_aps_avg_gcs, n_aps_avg_mcs, n_aps_avg_bcs, n_aps_avg_hcs]\n\n\nif __name__ == '__main__':\n data_path = \"Z:\\\\pyDentate\\\\pyDentateData\\\\pattern_separation_data_local_input_revised\\\\seed10000\\\\scale1000\\\\net_tunedrev\\\\\"\n # file_name = \"net_tunedrev.TunedNetwork_run_scale_000_1000.pydd\"\n data_files = [f for f in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, f)) and '.pydd' in f and 'spike_data' in f]\n perc_active_list = []\n avg_n_aps_list = []\n for x in data_files:\n print(x)\n perc_active, n_cells = get_perc_active_n_aps(data_path+x)\n perc_active_list.append(perc_active)\n avg_n_aps_list.append(n_cells)\n \n np.savetxt(data_path + \"perc_active_cells.txt\", np.array(perc_active_list), delimiter='\\t')\n np.savetxt(data_path + \"avg_n_aps.txt\", np.array(avg_n_aps_list), delimiter='\\t')\n \n\"\"\"\ndata = shelve.open(data_path + file_name)\nperc_active_gcs_list = []\nperc_active_mcs_list = []\nperc_active_bcs_list = []\nperc_active_hcs_list = []\n\nn_aps_avg_gcs_list = []\nn_aps_avg_mcs_list = []\nn_aps_avg_bcs_list = []\nn_aps_avg_hcs_list = []\n\nn_aps_std_gcs_list = []\nn_aps_std_mcs_list = []\nn_aps_std_bcs_list = []\nn_aps_std_hcs_list = []\n\n# Get to BasketCell Connection\nfor x in data_files:\n curr_data = shelve.open(data_path + x)\n active_gcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][0]['ap_number']) > 0),dtype = int).flatten()\n active_mcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][1]['ap_number']) > 0),dtype = int).flatten()\n active_bcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][2]['ap_number']) > 0),dtype = int).flatten()\n active_hcs = np.array(np.argwhere(np.array(curr_data[curr_data.keys()[0]]['populations'][3]['ap_number']) > 0),dtype = int).flatten()\n\n n_aps_avg_gcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][0]['ap_number'])[active_gcs].mean())\n n_aps_std_gcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][0]['ap_number'])[active_gcs].std())\n\n n_aps_avg_mcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][1]['ap_number'])[active_mcs].mean())\n n_aps_std_mcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][1]['ap_number'])[active_mcs].std())\n\n n_aps_avg_bcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][2]['ap_number'])[active_bcs].mean())\n n_aps_std_bcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][2]['ap_number'])[active_bcs].std())\n \n n_aps_avg_hcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][3]['ap_number'])[active_hcs].mean())\n n_aps_std_hcs_list.append(np.array(curr_data[curr_data.keys()[0]]['populations'][3]['ap_number'])[active_hcs].std())\n\n perc_active_gcs_list.append((len(active_gcs) / 2000.0)*100)\n perc_active_mcs_list.append((len(active_mcs) / 60.0)*100)\n perc_active_bcs_list.append((len(active_bcs) / 24.0)*100)\n perc_active_hcs_list.append((len(active_hcs) / 24.0)*100)\n \nn_active_gcs_array = np.array(perc_active_gcs_list)\nn_active_mcs_array = np.array(perc_active_mcs_list)\nn_active_bcs_array = np.array(perc_active_bcs_list)\nn_active_hcs_array = np.array(perc_active_hcs_list)\n\"\"\"",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 20 11:35:03 2018\n\n@author: DanielM\n\"\"\"\n\nimport os\nimport numpy as np\nimport shelve\nimport os\nimport analysis_main\n\n# Setup some parameters given by paradigm_frequency_inhibition.py\ndata_path = \"C:\\\\Users\\\\Daniel\\\\pyDentateData\\\\pattern_separation_data_local_input_revised\\\\seed10000\\\\scale1000\\\\net_globalrev\\\\\"\nsave_path = data_path\ndata_files = [f for f in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, f)) and '.npz' in f and 'spike_data' in f and 'convolved' in f]\ndata_files.sort()\n\ndata_files = data_files[0:25]\n\ncorr_matrix = np.empty((len(data_files), len(data_files)))\n\ndata_list = []\nfor x in data_files:\n data_list.append(np.load(data_path + x)['arr_0'])\n\nrow_idx_start = 0\nrow_idx_stop = 25\n# 376\nlen_bins = 6000\n\nfor row_idx, x in enumerate(data_list[row_idx_start:row_idx_stop]):\n for col_idx, y in enumerate(data_list[row_idx+row_idx_start:len(data_list)]):\n corr_matrix[row_idx+row_idx_start,col_idx+row_idx+row_idx_start]=analysis_main.sqrt_diff_norm(x,y,len_bins)\n \nnp.savetxt(save_path + \"1_sqrt-diff-norm_matrix_len-bin_\" + str(len_bins) + \".txt\", corr_matrix, delimiter=\"\\t\")",
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements the class StandardNetwork.\nStandardNetwork creates a ring network as defined in Santhakumar et al. 2005\nwith some changes as in Yim et al. 2015.\nSee StandardNetwork docstring for details.\nCreated on Tue Nov 28 13:01:38 2017\n\n@author: DanielM\n\"\"\"\n\nfrom neuron import h, gui\nimport ouropy\nimport numpy as np\nfrom granulecell import GranuleCell\nfrom mossycell_cat import MossyCell\nfrom basketcell import BasketCell\nfrom hippcell import HippCell\n\n\nclass TunedNetwork(ouropy.gennetwork.GenNetwork):\n \"\"\" This model implements the ring model from Santhakumar et al. 2005.\n with some changes as in Yim et al. 2015.\n It features inhibition but omits the MC->GC connection.\n \"\"\"\n name = \"TunedNetwork\"\n\n def __init__(self, seed=None, temporal_patterns=np.array([]),\n spatial_patterns_gcs=np.array([]),\n spatial_patterns_bcs=np.array([])):\n self.init_params = locals()\n self.init_params['self'] = str(self.init_params['self'])\n # Setup cells\n self.mk_population(GranuleCell, 2000)\n self.mk_population(MossyCell, 60)\n self.mk_population(BasketCell, 24)\n self.mk_population(HippCell, 24)\n\n # Set seed for reproducibility\n if seed:\n self.set_numpy_seed(seed)\n\n # Setup recordings\n self.populations[0].record_aps()\n self.populations[1].record_aps()\n self.populations[2].record_aps()\n self.populations[3].record_aps()\n\n temporal_patterns = np.array(temporal_patterns)\n print(np.shape(temporal_patterns))\n #temporal_patterns = np.atleast_2d(temporal_patterns)\n if type(spatial_patterns_gcs) == np.ndarray and type(temporal_patterns) == np.ndarray:\n #spatial_patterns_gcs = np.atleast_2d(spatial_patterns_gcs)\n for pat in range(len(spatial_patterns_gcs)):\n # PP -> GC\n #Original\n ouropy.gennetwork.PerforantPathPoissonTmgsyn(self.populations[0],\n temporal_patterns[pat],\n spatial_patterns_gcs[pat],\n 'midd', 5.5, 0, 1, 0, 0, 1.25*10**(-3))\n\n if type(spatial_patterns_bcs) == np.ndarray and type(temporal_patterns) == np.ndarray:\n #spatial_patterns_bcs = np.atleast_2d(spatial_patterns_bcs)\n for pat in range(len(spatial_patterns_bcs)):\n # PP -> BC\n ouropy.gennetwork.PerforantPathPoissonTmgsyn(self.populations[2],\n temporal_patterns[pat],\n spatial_patterns_bcs[pat],\n 'ddend', 6.3, 0, 1, 0, 0, 1*10**(-3))\n\n # GC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[1],\n 12, 'proxd',\n 1, 6.2, 500, 0.1, 0, 0, 10, 1.5, 0.2*10**(-2) * 10)\n\n # GC -> BC\n #Weight x4, target_pool = 2\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[2],\n 8, 'proxd',\n 1, 0.6, 500, 0.1, 0, 0, 10, 0.8, 18.8*10**(-2))\n\n # GC -> HC\n # Divergence x4; Weight doubled; Connected randomly.\n ouropy.gennetwork.tmgsynConnection(self.populations[0], self.populations[3],\n 24, 'proxd',\n 12, 0.6, 500, 0.1, 0, 0, 10, 1.5, 1.5*10**(-2))\n\n # MC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self. populations[1],\n 24, 'proxd',\n 3, 2.2, 0, 1, 0, 0, 10, 2, 0.5*10**(-3))\n\n # MC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self.populations[2],\n 12, 'proxd',\n 1, 0.1, 0, 1, 0, 0, 10, 3, 0.3*10**(-3))\n\n # MC -> HC\n ouropy.gennetwork.tmgsynConnection(self.populations[1], self.populations[3],\n 20, 'midd',\n 2, 3.6, 0, 1, 0, 0, 10, 3, 0.2*10**(-3))\n\n # BC -> GC\n # Nr. synapses x3; Weight *1/4; changed from 5.5 to 20 (Hefft & Jonas, 2005)\n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[0],\n 2000, 'soma',\n 400, 20, 0, 1, 0, -70, 10, 0.85, 1.2*10**(-3))\n\n # We reseed here to make sure that those connections are consistent\n # between this and net_global\n if seed:\n self.set_numpy_seed(seed)\n\n # BC -> MC \n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[1],\n 28, 'proxd',\n 3, 3.3, 0, 1, 0, -70, -10, 1.5, 1.5*10**(-3))\n\n # BC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[2], self.populations[2],\n 12,'proxd',\n 2, 1.8, 0,1,0,-70, -10, 0.8, 7.6*10**(-3))\n\n # HC -> GC\n # Weight x10; Nr synapses x4; changed from 6 to 20 (Hefft & Jonas, 2005)\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[0],\n 2000, 'dd',\n 640, 20, 0, 1, 0, -70, 10, 1.6, 0.6*10**(-2))\n\n # HC -> MC\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[1],\n 60, ['mid1d', 'mid2d'],\n 4, 6, 0, 1, 0, -70, 10, 1, 1.5*10**(-3))\n\n # HC -> BC\n ouropy.gennetwork.tmgsynConnection(self.populations[3], self.populations[2],\n 24, 'ddend',\n 4, 5.8, 0, 1, 0, -70, 10, 1.6, 0.5*10**(-3))\n"
] |
[
[
"numpy.array",
"numpy.shape"
],
[
"numpy.array",
"numpy.shape"
],
[
"numpy.array"
],
[
"numpy.load"
],
[
"numpy.array",
"numpy.shape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dreaming-coder/DeepLab
|
[
"3020544e2f9e139dde7bd04f6ff59e6f44d49c6e",
"3020544e2f9e139dde7bd04f6ff59e6f44d49c6e",
"3020544e2f9e139dde7bd04f6ff59e6f44d49c6e",
"3020544e2f9e139dde7bd04f6ff59e6f44d49c6e"
] |
[
"src/model/PredRNNpp/run.py",
"src/model/RadarNet/ConvRLSTM.py",
"src/model/MIM/MIMBlock.py",
"src/model/MIM/project_run.py"
] |
[
"from pathlib import Path\nfrom typing import List\n\nfrom torch import optim, nn\nfrom torch.utils.data import DataLoader\n\nfrom PredRNNpp import PredRNNpp\nfrom data.RadarDataset2 import RadarDataset2\nfrom util import TrainingTemplate\nfrom util.TestingTemplate import TestingTemplate\nfrom util.transforms.patch import reshape_patch, reshape_patch_back\n\n# 定义模型参数\nin_channels: int = 1\nhidden_channels_list: List[int] = [128, 128, 128]\nkernel_size_list: List[int] = [3, 3, 3]\nghu_hidden_channels: int = 128\nghu_kernel_size: int = 5\n\n# 定义训练参数\nmax_epochs: int = 2000\nlearning_rate: float = 1e-3\nbatch_size: int = 8\npatch: int = 4\ndevice: str = \"cuda:1\"\ntest_frequency: int = 1\nout_len: int = 20\nstart_save = 0\n\n\nclass PredRNNTrainer(TrainingTemplate):\n def check_data(self, data):\n inputs, labels = data\n # 这里是去除输入中的标签,以及多余的标签,因为有时候需要其他的,就放一起了\n inputs = inputs[:, :10]\n labels = labels[:, :10]\n # patch 分割,编写增加 batch_size 的 trick\n inputs = reshape_patch(inputs, patch_size=patch)\n labels = reshape_patch(labels, patch_size=patch)\n # 转换运算设备\n inputs = inputs.to(self.device)\n labels = labels.to(self.device)\n return inputs, labels\n\n\nclass PredRNNTester(TestingTemplate):\n def check_data(self, data):\n inputs, labels = data\n\n # patch 分割,编写增加 batch_size 的 trick\n inputs = reshape_patch(inputs, patch_size=patch)\n labels = reshape_patch(labels, patch_size=patch)\n # 转换运算设备\n inputs = inputs.to(self.device)\n labels = labels.to(self.device)\n return inputs, labels\n\n def check_data_back(self, labels, outputs):\n labels = reshape_patch_back(labels, patch_size=patch)\n outputs = reshape_patch_back(outputs, patch_size=patch)\n return labels, outputs\n\n\ndef train():\n # 加载数据集\n train_set = RadarDataset2(\"train\")\n test_set = RadarDataset2(\"test_real_sequences\")\n train_loader = DataLoader(dataset=train_set, batch_size=batch_size, num_workers=4, shuffle=True, drop_last=True,\n pin_memory=True)\n test_loader = DataLoader(dataset=test_set, batch_size=batch_size, num_workers=4, shuffle=False, drop_last=True,\n pin_memory=True)\n\n # 创建网络模型\n model = PredRNNpp(in_channels=in_channels * patch ** 2, hidden_channels_list=hidden_channels_list,\n ghu_hidden_channels=ghu_hidden_channels, kernel_size_list=kernel_size_list,\n ghu_kernel_size=ghu_kernel_size).to(device)\n\n # 定义损失函数\n criterion = nn.MSELoss()\n\n # 定义优化器\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n # 定义参数保存位置`\n to_save = Path(__file__).parent.joinpath(\"results\")\n if not to_save.exists():\n to_save.mkdir()\n\n # 定义训练器\n trainer = PredRNNTrainer(model=model, train_loader=train_loader, test_loader=test_loader, criterion=criterion,\n optimizer=optimizer, lr_scheduler=None, max_epochs=max_epochs, device=device,\n to_save=to_save, test_frequency=test_frequency, start_save=start_save, visualize=False)\n\n # 训练\n trainer.run()\n\n\ndef valid():\n test_set = RadarDataset2(\"test_real_sequences\")\n test_loader = DataLoader(dataset=test_set, batch_size=batch_size, num_workers=4, shuffle=False, drop_last=True,\n pin_memory=True)\n\n # 创建网络模型\n model = PredRNNpp(in_channels=in_channels * patch ** 2, hidden_channels_list=hidden_channels_list,\n ghu_hidden_channels=ghu_hidden_channels, kernel_size_list=kernel_size_list,\n ghu_kernel_size=ghu_kernel_size).to(device)\n\n # 定义参数保存位置`\n to_save = Path(__file__).parent.joinpath(\"results\")\n\n # 定义训练器\n tester = PredRNNTester(model=model, test_loader=test_loader, device=device, to_save=to_save)\n tester.run(out_len=out_len)\n\n\nif __name__ == '__main__':\n valid()\n",
"r\"\"\"\n《Convolutional Refine LSTM》\n\"\"\"\nimport warnings\n\nimport torch\nfrom torch import nn\n\n__all__ = [\"ConvRLSTM\"]\n\n\nclass ConvRLSTM(nn.Module):\n def __init__(self, in_channels: int, hidden_channels: int, kernel_size: int):\n r\"\"\"\n :param in_channels: 输入的通道数\n :param hidden_channels: 隐藏层通道数\n :param kernel_size: 卷积核尺寸\n \"\"\"\n warnings.warn(\"一次改动太多不好写,暂时放弃\", DeprecationWarning)\n super(ConvRLSTM, self).__init__()\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n\n padding = kernel_size // 2\n\n self.conv_x = nn.Conv2d(\n in_channels=in_channels, out_channels=hidden_channels * 4,\n kernel_size=kernel_size, stride=1, padding=padding\n )\n\n self.conv_m = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels * 4,\n kernel_size=kernel_size, stride=1, padding=padding\n )\n\n def forward(self, x, m, c):\n x_concat = self.conv_x(x)\n m_concat = self.conv_m(m)\n\n x_concat = torch.layer_norm(x_concat, x_concat.shape[1:])\n m_concat = torch.layer_norm(m_concat, m_concat.shape[1:])\n\n f_x, u_x, r_x, o_x = torch.split(x_concat, self.hidden_channels, dim=1)\n f_m, u_m, r_m, o_m = torch.split(m_concat, self.hidden_channels, dim=1)\n\n f = torch.sigmoid(f_x + f_m)\n u = torch.tanh(u_x + u_m)\n r = torch.sigmoid(r_x + r_m)\n g = r * (1 - torch.pow(1 - f, 2)) + (1 - r) * torch.pow(f, 2)\n c = g * c + (1 - g) * u\n o = torch.sigmoid(o_x + o_m)\n m = o * torch.tanh(c)\n\n return m, c\n",
"r\"\"\"\nMIM Block 的实现\n\"\"\"\nimport torch\nfrom torch import nn, Tensor\nfrom typing import Tuple\n\n__all__ = [\"MIMBlock\"]\n\n\nclass MIMBlock(nn.Module):\n\n def __init__(self, in_channels: int, hidden_channels: int, kernel_size: int, forget_bias: float = 0.01):\n r\"\"\"\n :param in_channels: 输入通道数\n :param hidden_channels: 隐藏层通道数\n :param kernel_size: 卷积核尺寸\n :param forget_bias: 偏移量\n \"\"\"\n super().__init__()\n\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.forget_bias = forget_bias\n\n padding = kernel_size // 2\n\n self.mim_n = MIMN(in_channels=in_channels, hidden_channels=hidden_channels, kernel_size=kernel_size,\n forget_bias=forget_bias)\n\n self.mim_s = MIMS(hidden_channels=hidden_channels, kernel_size=kernel_size, forget_bias=forget_bias)\n\n self.conv_x = nn.Conv2d(\n in_channels=in_channels, out_channels=hidden_channels * 6,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv_h = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels * 3,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv_m = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels * 3,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv_o_c = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv_o_m = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv1x1 = nn.Conv2d(\n in_channels=hidden_channels * 2, out_channels=hidden_channels,\n kernel_size=1, padding=0, stride=1\n )\n\n def forward(self, x_: Tensor, x: Tensor, c: Tensor, h: Tensor,\n m: Tensor, n: Tensor, s: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n r\"\"\"\n :param x_: h_{t-1}^{l-1}\n :param x: h_{t-1}^l\n :param c: cell记忆信息\n :param h: 时间方向记忆Tensor\n :param m: 空间方向记忆Tensor\n :param n: MIM-N 记忆Tensor\n :param s: MIM-S 记忆Tensor\n :return: 更新后的 c, h, m, n, s\n \"\"\"\n x_concat = self.conv_x(x)\n h_concat = self.conv_h(h)\n m_concat = self.conv_m(m)\n\n x_concat = torch.layer_norm(x_concat, x_concat.shape[1:])\n h_concat = torch.layer_norm(h_concat, h_concat.shape[1:])\n m_concat = torch.layer_norm(m_concat, m_concat.shape[1:])\n\n g_x, i_x, gg_x, ii_x, ff_x, o_x = torch.split(x_concat, self.hidden_channels, dim=1)\n g_h, i_h, o_h = torch.split(h_concat, self.hidden_channels, dim=1)\n gg_m, ii_m, ff_m = torch.split(m_concat, self.hidden_channels, dim=1)\n\n g = torch.tanh(g_x + g_h)\n i = torch.sigmoid(i_x + i_h)\n\n h_diff = x - x_\n n, d = self.mim_n(h_diff, n)\n s, t = self.mim_s(d, c, s)\n\n c = t + i * g\n\n gg = torch.tanh(gg_x + gg_m)\n ii = torch.sigmoid(ii_x + ii_m)\n ff = torch.sigmoid(ff_x + ff_m + self.forget_bias)\n\n m = ff * m + ii * gg\n\n o = torch.sigmoid(o_x + o_h + self.conv_o_c(c) + self.conv_o_m(m))\n\n states = torch.cat([c, m], dim=1)\n\n h = o * torch.tanh(self.conv1x1(states))\n\n return c, h, m, n, s\n\n\nclass MIMN(nn.Module):\n\n def __init__(self, in_channels: int, hidden_channels: int, kernel_size: int, forget_bias: float = 0.01):\n r\"\"\"\n :param in_channels: 输入通道数\n :param hidden_channels: 隐藏层通道数\n :param kernel_size: 卷积核尺寸\n :param forget_bias: 偏移量\n \"\"\"\n super().__init__()\n\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.forget_bias = forget_bias\n\n padding = kernel_size // 2\n\n self.conv_h_diff = nn.Conv2d(\n in_channels=in_channels, out_channels=hidden_channels * 4,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv_n = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels * 3,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv_w_no = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n def forward(self, h_diff: Tensor, n: Tensor) -> Tuple[Tensor, Tensor]:\n r\"\"\"\n :param h_diff: 输入的隐藏层的差分值\n :param n: 状态Tensor\n :return: n, d\n \"\"\"\n h_diff_concat = self.conv_h_diff(h_diff)\n h_diff_concat = torch.layer_norm(h_diff_concat, h_diff_concat.shape[1:])\n n_concat = self.conv_n(n)\n n_concat = torch.layer_norm(n_concat, n_concat.shape[1:])\n\n g_h, i_h, f_h, o_h = torch.split(h_diff_concat, self.hidden_channels, dim=1)\n g_n, i_n, f_n = torch.split(n_concat, self.hidden_channels, dim=1)\n\n g = torch.tanh(g_h + g_n)\n i = torch.sigmoid(i_h + i_n)\n f = torch.sigmoid(f_h + f_n + self.forget_bias)\n\n n = f * n + i * g\n\n o_n = self.conv_w_no(n)\n o_n = torch.layer_norm(o_n, o_n.shape[1:])\n o = torch.sigmoid(o_h + o_n)\n d = o * torch.tanh(n)\n\n return n, d\n\n\nclass MIMS(nn.Module):\n\n def __init__(self, hidden_channels: int, kernel_size: int, forget_bias: float = 0.01):\n r\"\"\"\n :param hidden_channels: 通道数\n :param kernel_size: 卷积核尺寸\n :param forget_bias: 偏移量\n \"\"\"\n super().__init__()\n\n self.hidden_channels = hidden_channels\n self.forget_bias = forget_bias\n\n padding = kernel_size // 2\n\n self.conv_d = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels * 4,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n self.conv_c = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels * 4,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n self.conv_w_so = nn.Conv2d(\n in_channels=hidden_channels, out_channels=hidden_channels,\n kernel_size=kernel_size, padding=padding, stride=1\n )\n\n def forward(self, d: Tensor, c: Tensor, s: Tensor) -> Tuple[Tensor, Tensor]:\n r\"\"\"\n :param d: 差分信息\n :param c: 状态记忆Tensor\n :param s: MIMS记忆Tensor\n :return: s, t\n \"\"\"\n d_concat = self.conv_d(d)\n c_concat = self.conv_c(c)\n\n d_concat = torch.layer_norm(d_concat, d_concat.shape[1:])\n c_concat = torch.layer_norm(c_concat, c_concat.shape[1:])\n\n g_d, i_d, f_d, o_d = torch.split(d_concat, self.hidden_channels, dim=1)\n g_c, i_c, f_c, o_c = torch.split(c_concat, self.hidden_channels, dim=1)\n\n g = torch.tanh(g_d + g_c)\n i = torch.sigmoid(i_d + i_c)\n f = torch.sigmoid(f_d + f_c + self.forget_bias)\n\n s = f * s + i * g\n\n o_s = self.conv_w_so(s)\n o_s = torch.layer_norm(o_s, o_s.shape[1:])\n\n o = torch.sigmoid(o_d + o_c + o_s)\n\n t = o * torch.tanh(s)\n\n return s, t\n",
"from pathlib import Path\nfrom typing import List\n\nfrom torch import optim, nn\nfrom torch.utils.data import DataLoader\n\nfrom MIM import MIM\nfrom data.RadarDataset import RadarDataset\nfrom util import TrainingTemplate\nfrom util.TestingTemplate import TestingTemplate\nfrom util.transforms.patch import reshape_patch, reshape_patch_back\n\n# 定义模型参数\nin_channels: int = 1\nhidden_channels_list: List[int] = [32, 32, 32, 32]\nkernel_size_list: List[int] = [3, 3, 3, 3]\n\n# 定义训练参数\nmax_epochs: int = 2000\nlearning_rate: float = 1e-4\nbatch_size: int = 2\npatch: int = 4\ndevice: str = \"cuda:0\"\ntest_frequency: int = 1\nout_len: int = 20\nstart_save = 0\n\n\nclass MIMTrainer(TrainingTemplate):\n def check_data(self, data):\n inputs, labels = data\n # 这里是去除输入中的标签,以及多余的标签,因为有时候需要其他的,就放一起了\n inputs = inputs[:, :10]\n labels = labels[:, :10]\n # patch 分割,编写增加 batch_size 的 trick\n inputs = reshape_patch(inputs, patch_size=patch)\n labels = reshape_patch(labels, patch_size=patch)\n # 转换运算设备\n inputs = inputs.to(self.device)\n labels = labels.to(self.device)\n return inputs, labels\n\n\nclass MIMTester(TestingTemplate):\n def check_data(self, data):\n inputs, labels = data\n # patch 分割,编写增加 batch_size 的 trick\n inputs = reshape_patch(inputs, patch_size=patch)\n labels = reshape_patch(labels, patch_size=patch)\n # 转换运算设备\n inputs = inputs.to(self.device)\n labels = labels.to(self.device)\n return inputs, labels\n\n def check_data_back(self, labels, outputs):\n labels = reshape_patch_back(labels, patch_size=patch)\n outputs = reshape_patch_back(outputs, patch_size=patch)\n return labels, outputs\n\n\ndef train():\n # 加载数据集\n train_set = RadarDataset(\"train\")\n test_set = RadarDataset(\"test_real_sequences\")\n train_loader = DataLoader(dataset=train_set, batch_size=batch_size, num_workers=4, shuffle=True, drop_last=True,\n pin_memory=True)\n test_loader = DataLoader(dataset=test_set, batch_size=batch_size, num_workers=4, shuffle=False, drop_last=True,\n pin_memory=True)\n\n # 创建网络模型\n model = MIM(in_channels=in_channels * patch ** 2, hidden_channels_list=hidden_channels_list,\n kernel_size_list=kernel_size_list).to(device)\n\n # 定义损失函数\n criterion = nn.MSELoss()\n\n # 定义优化器\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n # 定义参数保存位置`\n to_save = Path(__file__).parent.joinpath(\"results2\")\n if not to_save.exists():\n to_save.mkdir()\n\n # 定义训练器\n trainer = MIMTrainer(model=model, train_loader=train_loader, test_loader=test_loader, criterion=criterion,\n optimizer=optimizer, lr_scheduler=None, max_epochs=max_epochs, device=device,\n to_save=to_save, test_frequency=test_frequency, start_save=start_save, visualize=False)\n\n # 训练\n trainer.run()\n\n\ndef valid():\n test_set = RadarDataset(\"test_real_sequences\")\n test_loader = DataLoader(dataset=test_set, batch_size=batch_size, num_workers=4, shuffle=False, drop_last=True,\n pin_memory=True)\n\n # 创建网络模型\n model = MIM(in_channels=in_channels * patch ** 2, hidden_channels_list=hidden_channels_list,\n kernel_size_list=kernel_size_list).to(device)\n\n # 定义参数保存位置`\n to_save = Path(__file__).parent.joinpath(\"results2\")\n\n # 定义训练器\n tester = MIMTester(model=model, test_loader=test_loader, device=device, to_save=to_save)\n tester.run(out_len=out_len)\n\n\nif __name__ == '__main__':\n train()\n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.nn.MSELoss"
],
[
"torch.sigmoid",
"torch.nn.Conv2d",
"torch.tanh",
"torch.layer_norm",
"torch.split",
"torch.pow"
],
[
"torch.sigmoid",
"torch.cat",
"torch.nn.Conv2d",
"torch.tanh",
"torch.layer_norm",
"torch.split"
],
[
"torch.utils.data.DataLoader",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jasonzutty/ezCGP
|
[
"ee44425e4835f9d7ca5651bbea3340cf9466840a"
] |
[
"codes/block_definitions/block_arguments.py"
] |
[
"'''\nroot/code/block_definitions/block_arguments.py\n\nOverview:\nIn the first ever iteration of creating a CGP framework, it was suggested by Dr. Greg Rohling to remove hyperparamters from the genome and keep them in their own space and perform basic GA on them instead; that way, all the primitives only deal with data manipulation or classification rather than also including basic operations like addition or multiplication to evolve hyperparamter floats or booleans. The problem with this is that it leaves a lot of room for experimentation with how exactly we build out these list of arguments; so a lot more work has to be done to optimize what arguments are introduced and how they can be evolved with GA.\n\nEach block of each individual will have a .args attribute in it's genetic material which will be a random list of arguments/hyperparameters. What this class does, is define the scope of that list: how many arguments, which data types and how much of each. It also specifies the argument data type at each index of the .args list so that every individual will share the same data type but different value at an index.\n\nThe way this class works is that the user is expected to provide 2 things:\n* arg_count: int to be the length of the .arg list\n* arg_dict: dict with keys being the argument data type, and value being the percent share of .args that will be that arg data type\nThen init_from_weight_dict() will take those, run it through tools.build_weights to clean up the weight values, and then send to set_arg_types() to fill a list to .arg_types. That list will be the uninstantiated version of .args that all individuals will share to initialize their own .args at their init.\n\nRules:\nBe sure to follow the format to init the abstract class, assign the arg_count and arg dictionary, and then run init_from_weight_dict.\n'''\n\n### packages\nfrom typing import List\nimport inspect\nimport numpy as np\n\n### sys relative to root dir\nimport sys\nfrom os.path import dirname, realpath\nsys.path.append(dirname(dirname(dirname(realpath(__file__)))))\n\n### absolute imports wrt root\nfrom codes.block_definitions.utilities import tools\nfrom codes.block_definitions.utilities import argument_types\nfrom codes.utilities.custom_logging import ezLogging\n\n\n\nclass BlockArguments_Abstract():\n '''\n Note that this is not an ABC so the user isn't expected to write in their own methods for a new class.\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArguments_Abstract Class\" % (None, None))\n self.arg_count = 0\n self.each_type = []\n self.each_weight = []\n self.arg_types = []\n\n\n def init_from_weight_dict(self, weight_dict):\n '''\n This should be the most common method used to initialize this block def.\n It takes in a dictionary of arg data types (key) and the percent (value)\n of the .args that it should populate, and then passes that to set_arg_types\n to fill in .arg_types.\n Note how we first pass it into tools.build_weights. This is because we allow the\n user to assign a weight of '1' in their weight_dict; tools.build_weights goes and\n assigns the 'real' weights of each type after accounting for the '1' weights which\n allow for equal distribution of weight after all non-1 weights have been set.\n '''\n ezLogging.debug(\"%s-%s - Inside init_from_weight_dict; weight_dict: %s\" % (None, None, weight_dict))\n args, weights = tools.build_weights(weight_dict)\n self.each_type = args\n self.each_weight = weights\n self.set_arg_types()\n\n\n def get_all_classes(self, module='argument_types'):\n '''\n This is just a helper function to set_equal_weights() to grab all the classes declared in\n the given module. Pretty sexy but will likely never get used.\n '''\n ezLogging.debug(\"%s-%s - Inside get_all_classes; module: %s\" % (None, None, module))\n vals = inspect.getmembers(globals()[module], inspect.isclass)\n # vals will be a list of tuples (name, value)...we want the value\n all_classes = []\n for name, value in vals:\n all_classes.append(value)\n\n return all_classes\n\n\n def set_equal_weights(self, module='argument_types'):\n '''\n if a user was sort of lazy and just wanted to use all possible arguments and\n give them equal weights, then they would just call this function from their\n inherited class' init method.\n this returns a weight_dict, and then the user would pass that to init_from_Weight_dict()\n to complete the init\n '''\n ezLogging.debug(\"%s-%s - Inside set_all_equal_weights; module: %s\" % (None, None, module))\n weight_dict = {}\n for arg_type in self.get_all_classes(module):\n weight_dict[arg_type] = 1\n\n return weight_dict\n\n\n def set_arg_types(self):\n '''\n given a list of unique argument data types and another list giving the percent share of the\n arg_count, this method will fill out a list of .arg_types to be used to initialize the .arg\n of blocks/individuals.\n '''\n ezLogging.debug(\"%s-%s - Inside set_arg_types\" % (None, None))\n start_point = 0\n end_point = 0\n self.arg_types = [None]*self.arg_count\n for arg_class, arg_weight in zip(self.each_type, self.each_weight):\n end_point += int(arg_weight*self.arg_count)\n for arg_index in range(start_point, end_point):\n self.arg_types[arg_index] = arg_class\n start_point = end_point\n\n if end_point != self.arg_count:\n # prob some rounding errors then\n sorted_byweight = np.argsort(self.each_weight)[::-1] # sort then reverse to go from largest to smallest\n for i, arg_index in enumerate(range(end_point, self.arg_count)):\n arg_class = self.each_type[sorted_byweight[i]]\n self.arg_types[arg_index] = arg_class\n else:\n pass\n\n\n\nclass BlockArguments_Size50(BlockArguments_Abstract):\n '''\n super simple implementation...50 args: 25 ints, and 25 power of 2s\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArgumentsSize50 Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n self.arg_count = 50\n arg_dict = {argument_types.ArgumentType_Ints: 1,\n argument_types.ArgumentType_Pow2: 1}\n self.init_from_weight_dict(arg_dict)\n\n\n\nclass BlockArguments_NoArgs(BlockArguments_Abstract):\n '''\n this will be used for when our operators/primitives should never need arguments,\n so we would never populate .args and it will stay an empty list\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArgumentsNoArgs Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n\n\n\nclass BlockArguments_SmallFloatOnly(BlockArguments_Abstract):\n '''\n this will be used for when our operators/primitives should never need arguments,\n so we would never populate .args and it will stay an empty list\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArgumentsSmallFloatOnly Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n self.arg_count = 20\n arg_dict = {argument_types.ArgumentType_SmallFloats: 1}\n self.init_from_weight_dict(arg_dict)\n\n\n\nclass BlockArguments_Gaussian(BlockArguments_Abstract):\n '''\n floats 0-100 for peak location\n ints 0-100 for curve intensity\n floats 0-1 for the std\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArguments_Gaussian Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n self.arg_count = 10*3*10 # 10 curves, 3 args each, x10\n arg_dict = {argument_types.ArgumentType_Float0to100: 1,\n argument_types.ArgumentType_Int0to100: 1,\n argument_types.ArgumentType_Float0to1: 1}\n self.init_from_weight_dict(arg_dict)\n\n\n\nclass BlockArguments_DataAugmentation(BlockArguments_Abstract):\n '''\n usage tally:\n argument_types.ArgumentType_LimitedFloat0to1 - lllll lllll lllll ll\n argument_types.ArgumentType_Int1to10 - lll\n argument_types.ArgumentType_Int0to25 - ll\n argument_types.ArgumentType_Float0to10 - lllll l\n argument_types.ArgumentType_Bool - l\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArguments_DataAugmentation Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n self.arg_count = 30*3\n arg_dict = {argument_types.ArgumentType_LimitedFloat0to1: 0.5, # 17/30 of all args\n argument_types.ArgumentType_Int1to10: 1,\n argument_types.ArgumentType_Int0to25: 1,\n argument_types.ArgumentType_Float0to10: 0.2, # 6/30\n argument_types.ArgumentType_Bool: 1}\n self.init_from_weight_dict(arg_dict)\n\n\n\nclass BlockArguments_DataPreprocessing(BlockArguments_Abstract):\n '''\n usage tally:\n argument_types.ArgumentType_FilterSize - llll\n argument_types.ArgumentType_Bool - l\n argument_types.ArgumentType_Int1to10 - l\n argument_types.ArgumentType_Float0to100- lll\n argument_types.ArgumentType_LimitedFloat0to1 - ll\n argument_types.ArgumentType_Int0to25 - ll\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArguments_DataPreprocessing Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n self.arg_count = 13*3\n arg_dict = {argument_types.ArgumentType_FilterSize: 0.33, # 4/13\n argument_types.ArgumentType_Bool: 1,\n argument_types.ArgumentType_Int1to10: 1,\n argument_types.ArgumentType_Float0to100: 0.25, # 3/13\n argument_types.ArgumentType_LimitedFloat0to1: 0.15, #2/13\n argument_types.ArgumentType_Int0to25: 1}\n self.init_from_weight_dict(arg_dict)\n\n\n\nclass BlockArguments_TransferLearning(BlockArguments_Abstract):\n '''\n usage tally:\n argument_types.ArgumentType_Int0to25 - l\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArguments_TransferLearning Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n self.arg_count = 1*3\n arg_dict = {argument_types.ArgumentType_Int0to25: 1}\n self.init_from_weight_dict(arg_dict)\n\n\n\nclass BlockArguments_TFKeras(BlockArguments_Abstract):\n '''\n usage tally:\n argument_types.ArgumentType_Pow2 - llll\n argument_types.ArgumentType_TFFilterSize - llll\n argument_types.ArgumentType_TFActivation - llll\n '''\n def __init__(self):\n ezLogging.debug(\"%s-%s - Initialize BlockArguments_TFKeras Class\" % (None, None))\n BlockArguments_Abstract.__init__(self)\n self.arg_count = 12*3\n arg_dict = {argument_types.ArgumentType_Pow2: 1,\n argument_types.ArgumentType_TFFilterSize: 1,\n argument_types.ArgumentType_TFActivation: 1}\n self.init_from_weight_dict(arg_dict)\n\n\n\n"
] |
[
[
"numpy.argsort"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chaanks/speechbrain
|
[
"6447bde54f6e3fb07fdb934ab535f17cadfbad53",
"6447bde54f6e3fb07fdb934ab535f17cadfbad53"
] |
[
"speechbrain/processing/multi_mic.py",
"speechbrain/processing/decomposition.py"
] |
[
"\"\"\"Multi-microphone components.\n\nThis library contains functions for multi-microphone signal processing.\n\nExample\n-------\n>>> import torch\n>>>\n>>> from speechbrain.dataio.dataio import read_audio\n>>> from speechbrain.processing.features import STFT, ISTFT\n>>> from speechbrain.processing.multi_mic import Covariance\n>>> from speechbrain.processing.multi_mic import GccPhat, SrpPhat, Music\n>>> from speechbrain.processing.multi_mic import DelaySum, Mvdr, Gev\n>>>\n>>> xs_speech = read_audio(\n... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n... )\n>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]\n>>> xs_noise_diff = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n>>> xs_noise_diff = xs_noise_diff.unsqueeze(0)\n>>> xs_noise_loc = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')\n>>> xs_noise_loc = xs_noise_loc.unsqueeze(0)\n>>> fs = 16000 # sampling rate\n\n>>> ss = xs_speech\n>>> nn_diff = 0.05 * xs_noise_diff\n>>> nn_loc = 0.05 * xs_noise_loc\n>>> xs_diffused_noise = ss + nn_diff\n>>> xs_localized_noise = ss + nn_loc\n\n>>> # Delay-and-Sum Beamforming with GCC-PHAT localization\n>>> stft = STFT(sample_rate=fs)\n>>> cov = Covariance()\n>>> gccphat = GccPhat()\n>>> delaysum = DelaySum()\n>>> istft = ISTFT(sample_rate=fs)\n\n>>> Xs = stft(xs_diffused_noise)\n>>> Ns = stft(nn_diff)\n>>> XXs = cov(Xs)\n>>> NNs = cov(Ns)\n>>> tdoas = gccphat(XXs)\n>>> Ys_ds = delaysum(Xs, tdoas)\n>>> ys_ds = istft(Ys_ds)\n\n>>> # Mvdr Beamforming with SRP-PHAT localization\n>>> mvdr = Mvdr()\n>>> mics = torch.zeros((4,3), dtype=torch.float)\n>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])\n>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])\n>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n>>> srpphat = SrpPhat(mics=mics)\n>>> doas = srpphat(XXs)\n>>> Ys_mvdr = mvdr(Xs, NNs, doas, doa_mode=True, mics=mics, fs=fs)\n>>> ys_mvdr = istft(Ys_mvdr)\n\n>>> # Mvdr Beamforming with MUSIC localization\n>>> music = Music(mics=mics)\n>>> doas = music(XXs)\n>>> Ys_mvdr2 = mvdr(Xs, NNs, doas, doa_mode=True, mics=mics, fs=fs)\n>>> ys_mvdr2 = istft(Ys_mvdr2)\n\n>>> # GeV Beamforming\n>>> gev = Gev()\n>>> Xs = stft(xs_localized_noise)\n>>> Ss = stft(ss)\n>>> Ns = stft(nn_loc)\n>>> SSs = cov(Ss)\n>>> NNs = cov(Ns)\n>>> Ys_gev = gev(Xs, SSs, NNs)\n>>> ys_gev = istft(Ys_gev)\n\nAuthors:\n * William Aris\n * Francois Grondin\n\n\"\"\"\n\nimport torch\nfrom packaging import version\nimport speechbrain.processing.decomposition as eig\n\n\nclass Covariance(torch.nn.Module):\n \"\"\"Computes the covariance matrices of the signals.\n\n Arguments:\n ----------\n average : bool\n Informs the module if it should return an average\n (computed on the time dimension) of the covariance\n matrices. The Default value is True.\n\n Example\n -------\n >>> import torch\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> xs_noise = xs_noise.unsqueeze(0)\n >>> xs = xs_speech + 0.05 * xs_noise\n >>> fs = 16000\n\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>>\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> XXs.shape\n torch.Size([1, 1001, 201, 2, 10])\n \"\"\"\n\n def __init__(self, average=True):\n\n super().__init__()\n self.average = average\n\n def forward(self, Xs):\n \"\"\" This method uses the utility function _cov to compute covariance\n matrices. Therefore, the result has the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics + n_pairs).\n\n The order on the last dimension corresponds to the triu_indices for a\n square matrix. For instance, if we have 4 channels, we get the following\n order: (0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 3), (2, 2), (2, 3)\n and (3, 3). Therefore, XXs[..., 0] corresponds to channels (0, 0) and XXs[..., 1]\n corresponds to channels (0, 1).\n\n Arguments:\n ----------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics)\n \"\"\"\n\n XXs = Covariance._cov(Xs=Xs, average=self.average)\n return XXs\n\n @staticmethod\n def _cov(Xs, average=True):\n \"\"\" Computes the covariance matrices (XXs) of the signals. The result will\n have the following format: (batch, time_step, n_fft/2 + 1, 2, n_mics + n_pairs).\n\n Arguments:\n ----------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics)\n\n average : boolean\n Informs the function if it should return an average\n (computed on the time dimension) of the covariance\n matrices. Default value is True.\n \"\"\"\n\n # Get useful dimensions\n n_mics = Xs.shape[4]\n\n # Formatting the real and imaginary parts\n Xs_re = Xs[..., 0, :].unsqueeze(4)\n Xs_im = Xs[..., 1, :].unsqueeze(4)\n\n # Computing the covariance\n Rxx_re = torch.matmul(Xs_re, Xs_re.transpose(3, 4)) + torch.matmul(\n Xs_im, Xs_im.transpose(3, 4)\n )\n\n Rxx_im = torch.matmul(Xs_re, Xs_im.transpose(3, 4)) - torch.matmul(\n Xs_im, Xs_re.transpose(3, 4)\n )\n\n # Selecting the upper triangular part of the covariance matrices\n idx = torch.triu_indices(n_mics, n_mics)\n\n XXs_re = Rxx_re[..., idx[0], idx[1]]\n XXs_im = Rxx_im[..., idx[0], idx[1]]\n\n XXs = torch.stack((XXs_re, XXs_im), 3)\n\n # Computing the average if desired\n if average is True:\n n_time_frames = XXs.shape[1]\n XXs = torch.mean(XXs, 1, keepdim=True)\n XXs = XXs.repeat(1, n_time_frames, 1, 1, 1)\n\n return XXs\n\n\nclass DelaySum(torch.nn.Module):\n \"\"\"Performs delay and sum beamforming by using the TDOAs and\n the first channel as a reference.\n\n Example\n -------\n >>> import torch\n\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT, ISTFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import GccPhat, DelaySum\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_speech = xs_speech. unsqueeze(0) # [batch, time, channel]\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]\n >>> fs = 16000\n >>> xs = xs_speech + 0.05 * xs_noise\n >>>\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> gccphat = GccPhat()\n >>> delaysum = DelaySum()\n >>> istft = ISTFT(sample_rate=fs)\n >>>\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> tdoas = gccphat(XXs)\n >>> Ys = delaysum(Xs, tdoas)\n >>> ys = istft(Ys)\n \"\"\"\n\n def __init__(self):\n\n super().__init__()\n\n def forward(\n self,\n Xs,\n localization_tensor,\n doa_mode=False,\n mics=None,\n fs=None,\n c=343.0,\n ):\n \"\"\"This method computes a steering vector by using the TDOAs/DOAs and\n then calls the utility function _delaysum to perform beamforming.\n The result has the following format: (batch, time_step, n_fft, 2, 1).\n\n Arguments\n ---------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics)\n localization_tensor : tensor\n A tensor containing either time differences of arrival (TDOAs)\n (in samples) for each timestamp or directions of arrival (DOAs)\n (xyz coordinates in meters). If localization_tensor represents\n TDOAs, then its format is (batch, time_steps, n_mics + n_pairs).\n If localization_tensor represents DOAs, then its format is\n (batch, time_steps, 3)\n doa_mode : bool\n The user needs to set this parameter to True if localization_tensor\n represents DOAs instead of TDOAs. Its default value is set to False.\n mics : tensor\n The cartesian position (xyz coordinates in meters) of each microphone.\n The tensor must have the following format (n_mics, 3). This\n parameter is only mandatory when localization_tensor represents\n DOAs.\n fs : int\n The sample rate in Hertz of the signals. This parameter is only\n mandatory when localization_tensor represents DOAs.\n c : float\n The speed of sound in the medium. The speed is expressed in meters\n per second and the default value of this parameter is 343 m/s. This\n parameter is only used when localization_tensor represents DOAs.\n \"\"\"\n\n # Get useful dimensions\n n_fft = Xs.shape[2]\n localization_tensor = localization_tensor.to(Xs.device)\n # Convert the tdoas to taus\n if doa_mode:\n taus = doas2taus(doas=localization_tensor, mics=mics, fs=fs, c=c)\n\n else:\n taus = tdoas2taus(tdoas=localization_tensor)\n\n # Generate the steering vector\n As = steering(taus=taus, n_fft=n_fft)\n\n # Apply delay and sum\n Ys = DelaySum._delaysum(Xs=Xs, As=As)\n\n return Ys\n\n @staticmethod\n def _delaysum(Xs, As):\n \"\"\"Perform delay and sum beamforming. The result has\n the following format: (batch, time_step, n_fft, 2, 1).\n\n Arguments\n ---------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics)\n As : tensor\n The steering vector to point in the direction of\n the target source. The tensor must have the format\n (batch, time_step, n_fft/2 + 1, 2, n_mics)\n \"\"\"\n\n # Get useful dimensions\n n_mics = Xs.shape[4]\n\n # Generate unmixing coefficients\n Ws_re = As[..., 0, :] / n_mics\n Ws_im = -1 * As[..., 1, :] / n_mics\n\n # Get input signal\n Xs_re = Xs[..., 0, :]\n Xs_im = Xs[..., 1, :]\n\n # Applying delay and sum\n Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)\n Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)\n\n # Assembling the result\n Ys = torch.stack((Ys_re, Ys_im), 3)\n\n return Ys\n\n\nclass Mvdr(torch.nn.Module):\n \"\"\"Perform minimum variance distortionless response (MVDR) beamforming\n by using an input signal in the frequency domain, its covariance matrices\n and tdoas (to compute a steering vector).\n\n Example\n -------\n >>> import torch\n\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT, ISTFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import GccPhat, DelaySum\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channel]\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]\n >>> fs = 16000\n >>> xs = xs_speech + 0.05 * xs_noise\n >>>\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> gccphat = GccPhat()\n >>> mvdr = Mvdr()\n >>> istft = ISTFT(sample_rate=fs)\n >>>\n >>> Xs = stft(xs)\n >>> Ns = stft(xs_noise)\n >>> XXs = cov(Xs)\n >>> NNs = cov(Ns)\n >>> tdoas = gccphat(XXs)\n >>> Ys = mvdr(Xs, NNs, tdoas)\n >>> ys = istft(Ys)\n \"\"\"\n\n def __init__(self, eps=1e-20):\n\n super().__init__()\n\n self.eps = eps\n\n def forward(\n self,\n Xs,\n NNs,\n localization_tensor,\n doa_mode=False,\n mics=None,\n fs=None,\n c=343.0,\n ):\n \"\"\"This method computes a steering vector before using the\n utility function _mvdr to perform beamforming. The result has\n the following format: (batch, time_step, n_fft, 2, 1).\n\n Arguments\n ---------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics)\n NNs : tensor\n The covariance matrices of the noise signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs)\n localization_tensor : tensor\n A tensor containing either time differences of arrival (TDOAs)\n (in samples) for each timestamp or directions of arrival (DOAs)\n (xyz coordinates in meters). If localization_tensor represents\n TDOAs, then its format is (batch, time_steps, n_mics + n_pairs).\n If localization_tensor represents DOAs, then its format is\n (batch, time_steps, 3)\n doa_mode : bool\n The user needs to set this parameter to True if localization_tensor\n represents DOAs instead of TDOAs. Its default value is set to False.\n mics : tensor\n The cartesian position (xyz coordinates in meters) of each microphone.\n The tensor must have the following format (n_mics, 3). This\n parameter is only mandatory when localization_tensor represents\n DOAs.\n fs : int\n The sample rate in Hertz of the signals. This parameter is only\n mandatory when localization_tensor represents DOAs.\n c : float\n The speed of sound in the medium. The speed is expressed in meters\n per second and the default value of this parameter is 343 m/s. This\n parameter is only used when localization_tensor represents DOAs.\n \"\"\"\n # Get useful dimensions\n n_fft = Xs.shape[2]\n localization_tensor = localization_tensor.to(Xs.device)\n NNs = NNs.to(Xs.device)\n if mics is not None:\n mics = mics.to(Xs.device)\n\n # Convert the tdoas to taus\n if doa_mode:\n taus = doas2taus(doas=localization_tensor, mics=mics, fs=fs, c=c)\n\n else:\n taus = tdoas2taus(tdoas=localization_tensor)\n\n # Generate the steering vector\n As = steering(taus=taus, n_fft=n_fft)\n\n # Perform mvdr\n Ys = Mvdr._mvdr(Xs=Xs, NNs=NNs, As=As)\n\n return Ys\n\n @staticmethod\n def _mvdr(Xs, NNs, As, eps=1e-20):\n \"\"\"Perform minimum variance distortionless response beamforming.\n\n Arguments\n ---------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics).\n NNs : tensor\n The covariance matrices of the noise signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n As : tensor\n The steering vector to point in the direction of\n the target source. The tensor must have the format\n (batch, time_step, n_fft/2 + 1, 2, n_mics).\n \"\"\"\n\n # Get unique covariance values to reduce the number of computations\n NNs_val, NNs_idx = torch.unique(NNs, return_inverse=True, dim=1)\n\n # Inverse covariance matrices\n NNs_inv = eig.inv(NNs_val)\n\n # Capture real and imaginary parts, and restore time steps\n NNs_inv_re = NNs_inv[..., 0][:, NNs_idx]\n NNs_inv_im = NNs_inv[..., 1][:, NNs_idx]\n\n # Decompose steering vector\n AsC_re = As[..., 0, :].unsqueeze(4)\n AsC_im = 1.0 * As[..., 1, :].unsqueeze(4)\n AsT_re = AsC_re.transpose(3, 4)\n AsT_im = -1.0 * AsC_im.transpose(3, 4)\n\n # Project\n NNs_inv_AsC_re = torch.matmul(NNs_inv_re, AsC_re) - torch.matmul(\n NNs_inv_im, AsC_im\n )\n NNs_inv_AsC_im = torch.matmul(NNs_inv_re, AsC_im) + torch.matmul(\n NNs_inv_im, AsC_re\n )\n\n # Compute the gain\n alpha = 1.0 / (\n torch.matmul(AsT_re, NNs_inv_AsC_re)\n - torch.matmul(AsT_im, NNs_inv_AsC_im)\n )\n\n # Get the unmixing coefficients\n Ws_re = torch.matmul(NNs_inv_AsC_re, alpha).squeeze(4)\n Ws_im = -torch.matmul(NNs_inv_AsC_im, alpha).squeeze(4)\n\n # Applying MVDR\n Xs_re = Xs[..., 0, :]\n Xs_im = Xs[..., 1, :]\n\n Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)\n Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)\n\n Ys = torch.stack((Ys_re, Ys_im), -2)\n\n return Ys\n\n\nclass Gev(torch.nn.Module):\n \"\"\"Generalized EigenValue decomposition (GEV) Beamforming.\n\n Example\n -------\n >>> from speechbrain.dataio.dataio import read_audio\n >>> import torch\n >>>\n >>> from speechbrain.processing.features import STFT, ISTFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import Gev\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')\n >>> xs_noise = xs_noise.unsqueeze(0)\n >>> fs = 16000\n >>> ss = xs_speech\n >>> nn = 0.05 * xs_noise\n >>> xs = ss + nn\n >>>\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> gev = Gev()\n >>> istft = ISTFT(sample_rate=fs)\n >>>\n >>> Ss = stft(ss)\n >>> Nn = stft(nn)\n >>> Xs = stft(xs)\n >>>\n >>> SSs = cov(Ss)\n >>> NNs = cov(Nn)\n >>>\n >>> Ys = gev(Xs, SSs, NNs)\n >>> ys = istft(Ys)\n \"\"\"\n\n def __init__(self):\n\n super().__init__()\n\n def forward(self, Xs, SSs, NNs):\n \"\"\" This method uses the utility function _gev to perform generalized\n eigenvalue decomposition beamforming. Therefore, the result has\n the following format: (batch, time_step, n_fft, 2, 1).\n\n Arguments\n ---------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics).\n SSs : tensor\n The covariance matrices of the target signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n NNs : tensor\n The covariance matrices of the noise signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n \"\"\"\n\n Ys = Gev._gev(Xs=Xs, SSs=SSs, NNs=NNs)\n\n return Ys\n\n @staticmethod\n def _gev(Xs, SSs, NNs):\n \"\"\" Perform generalized eigenvalue decomposition beamforming. The result\n has the following format: (batch, time_step, n_fft, 2, 1).\n\n Arguments\n ---------\n Xs : tensor\n A batch of audio signals in the frequency domain.\n The tensor must have the following format:\n (batch, time_step, n_fft/2 + 1, 2, n_mics).\n SSs : tensor\n The covariance matrices of the target signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n NNs : tensor\n The covariance matrices of the noise signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n \"\"\"\n\n # Putting on the right device\n SSs = SSs.to(Xs.device)\n NNs = NNs.to(Xs.device)\n\n # Get useful dimensions\n n_mics = Xs.shape[4]\n n_mics_pairs = SSs.shape[4]\n\n # Computing the eigenvectors\n SSs_NNs = torch.cat((SSs, NNs), dim=4)\n SSs_NNs_val, SSs_NNs_idx = torch.unique(\n SSs_NNs, return_inverse=True, dim=1\n )\n\n SSs = SSs_NNs_val[..., range(0, n_mics_pairs)]\n NNs = SSs_NNs_val[..., range(n_mics_pairs, 2 * n_mics_pairs)]\n NNs = eig.pos_def(NNs)\n Vs, Ds = eig.gevd(SSs, NNs)\n\n # Beamforming\n F_re = Vs[..., (n_mics - 1), 0]\n F_im = Vs[..., (n_mics - 1), 1]\n\n # Normalize\n F_norm = 1.0 / (\n torch.sum(F_re ** 2 + F_im ** 2, dim=3, keepdim=True) ** 0.5\n ).repeat(1, 1, 1, n_mics)\n F_re *= F_norm\n F_im *= F_norm\n\n Ws_re = F_re[:, SSs_NNs_idx]\n Ws_im = F_im[:, SSs_NNs_idx]\n\n Xs_re = Xs[..., 0, :]\n Xs_im = Xs[..., 1, :]\n\n Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)\n Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)\n\n # Assembling the output\n Ys = torch.stack((Ys_re, Ys_im), 3)\n\n return Ys\n\n\nclass GccPhat(torch.nn.Module):\n \"\"\"Generalized Cross-Correlation with Phase Transform localization.\n\n Arguments\n ---------\n tdoa_max : int\n Specifies a range to search for delays. For example, if\n tdoa_max = 10, the method will restrict its search for delays\n between -10 and 10 samples. This parameter is optional and its\n default value is None. When tdoa_max is None, the method will\n search for delays between -n_fft/2 and n_fft/2 (full range).\n eps : float\n A small value to avoid divisions by 0 with the phase transformation.\n The default value is 1e-20.\n\n Example\n -------\n >>> import torch\n\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT, ISTFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import GccPhat, DelaySum\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channel]\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]\n >>> fs = 16000\n >>> xs = xs_speech + 0.05 * xs_noise\n >>>\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> gccphat = GccPhat()\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> tdoas = gccphat(XXs)\n \"\"\"\n\n def __init__(self, tdoa_max=None, eps=1e-20):\n\n super().__init__()\n self.tdoa_max = tdoa_max\n self.eps = eps\n\n def forward(self, XXs):\n \"\"\" Perform generalized cross-correlation with phase transform localization\n by using the utility function _gcc_phat and by extracting the delays (in samples)\n before performing a quadratic interpolation to improve the accuracy.\n The result has the format: (batch, time_steps, n_mics + n_pairs).\n\n The order on the last dimension corresponds to the triu_indices for a\n square matrix. For instance, if we have 4 channels, we get the following\n order: (0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 3), (2, 2), (2, 3)\n and (3, 3). Therefore, delays[..., 0] corresponds to channels (0, 0) and delays[..., 1]\n corresponds to channels (0, 1).\n\n Arguments:\n ----------\n XXs : tensor\n The covariance matrices of the input signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n \"\"\"\n\n xxs = GccPhat._gcc_phat(XXs=XXs, eps=self.eps)\n delays = GccPhat._extract_delays(xxs=xxs, tdoa_max=self.tdoa_max)\n tdoas = GccPhat._interpolate(xxs=xxs, delays=delays)\n return tdoas\n\n @staticmethod\n def _gcc_phat(XXs, eps=1e-20):\n \"\"\" Evaluate GCC-PHAT for each timestamp. It returns the result in the time\n domain. The result has the format: (batch, time_steps, n_fft, n_mics + n_pairs).\n\n Arguments\n ---------\n XXs : tensor\n The covariance matrices of the input signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n eps : float\n A small value to avoid divisions by 0 with the phase transform. The\n default value is 1e-20.\n \"\"\"\n\n # Get useful dimensions\n n_samples = (XXs.shape[2] - 1) * 2\n\n # Extracting the tensors needed\n XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=4)\n\n XXs_re = XXs_val[..., 0, :]\n XXs_im = XXs_val[..., 1, :]\n\n # Applying the phase transform\n XXs_abs = torch.sqrt(XXs_re ** 2 + XXs_im ** 2) + eps\n XXs_re_phat = XXs_re / XXs_abs\n XXs_im_phat = XXs_im / XXs_abs\n XXs_phat = torch.stack((XXs_re_phat, XXs_im_phat), 4)\n\n # Returning in the temporal domain\n XXs_phat = XXs_phat.transpose(2, 3)\n\n if version.parse(torch.__version__) >= version.parse(\"1.8.0\"):\n XXs_phat = torch.complex(XXs_phat[..., 0], XXs_phat[..., 1])\n xxs = torch.fft.irfft(XXs_phat, n=n_samples)\n else:\n xxs = torch.irfft(XXs_phat, signal_ndim=1, signal_sizes=[n_samples])\n\n xxs = xxs[..., XXs_idx, :]\n\n # Formatting the output\n xxs = xxs.transpose(2, 3)\n\n return xxs\n\n @staticmethod\n def _extract_delays(xxs, tdoa_max=None):\n \"\"\" Extract the rounded delays from the cross-correlation for each timestamp.\n The result has the format: (batch, time_steps, n_mics + n_pairs).\n\n Arguments\n ---------\n xxs : tensor\n The correlation signals obtained after a gcc-phat operation. The tensor\n must have the format (batch, time_steps, n_fft, n_mics + n_pairs).\n tdoa_max : int\n Specifies a range to search for delays. For example, if\n tdoa_max = 10, the method will restrict its search for delays\n between -10 and 10 samples. This parameter is optional and its\n default value is None. When tdoa_max is None, the method will\n search for delays between -n_fft/2 and +n_fft/2 (full range).\n \"\"\"\n\n # Get useful dimensions\n n_fft = xxs.shape[2]\n\n # If no tdoa specified, cover the whole frame\n if tdoa_max is None:\n tdoa_max = torch.div(n_fft, 2, rounding_mode=\"floor\")\n\n # Splitting the GCC-PHAT values to search in the range\n slice_1 = xxs[..., 0:tdoa_max, :]\n slice_2 = xxs[..., -tdoa_max:, :]\n\n xxs_sliced = torch.cat((slice_1, slice_2), 2)\n\n # Extracting the delays in the range\n _, delays = torch.max(xxs_sliced, 2)\n\n # Adjusting the delays that were affected by the slicing\n offset = n_fft - xxs_sliced.shape[2]\n idx = delays >= slice_1.shape[2]\n delays[idx] += offset\n\n # Centering the delays around 0\n delays[idx] -= n_fft\n\n return delays\n\n @staticmethod\n def _interpolate(xxs, delays):\n \"\"\"Perform quadratic interpolation on the cross-correlation to\n improve the tdoa accuracy. The result has the format:\n (batch, time_steps, n_mics + n_pairs)\n\n Arguments\n ---------\n xxs : tensor\n The correlation signals obtained after a gcc-phat operation. The tensor\n must have the format (batch, time_steps, n_fft, n_mics + n_pairs).\n delays : tensor\n The rounded tdoas obtained by selecting the sample with the highest\n amplitude. The tensor must have the format\n (batch, time_steps, n_mics + n_pairs).\n \"\"\"\n\n # Get useful dimensions\n n_fft = xxs.shape[2]\n\n # Get the max amplitude and its neighbours\n tp = torch.fmod((delays - 1) + n_fft, n_fft).unsqueeze(2)\n y1 = torch.gather(xxs, 2, tp).squeeze(2)\n tp = torch.fmod(delays + n_fft, n_fft).unsqueeze(2)\n y2 = torch.gather(xxs, 2, tp).squeeze(2)\n tp = torch.fmod((delays + 1) + n_fft, n_fft).unsqueeze(2)\n y3 = torch.gather(xxs, 2, tp).squeeze(2)\n\n # Add a fractional part to the initially rounded delay\n delays_frac = delays + (y1 - y3) / (2 * y1 - 4 * y2 + 2 * y3)\n\n return delays_frac\n\n\nclass SrpPhat(torch.nn.Module):\n \"\"\"Steered-Response Power with Phase Transform Localization.\n\n Arguments\n ---------\n mics : tensor\n The cartesian coordinates (xyz) in meters of each microphone.\n The tensor must have the following format (n_mics, 3).\n space : string\n If this parameter is set to 'sphere', the localization will\n be done in 3D by searching in a sphere of possible doas. If\n it set to 'circle', the search will be done in 2D by searching\n in a circle. By default, this parameter is set to 'sphere'.\n Note: The 'circle' option isn't implemented yet.\n sample_rate : int\n The sample rate in Hertz of the signals to perform SRP-PHAT on.\n By default, this parameter is set to 16000 Hz.\n speed_sound : float\n The speed of sound in the medium. The speed is expressed in meters\n per second and the default value of this parameter is 343 m/s.\n eps : float\n A small value to avoid errors like division by 0. The default value\n of this parameter is 1e-20.\n\n Example\n -------\n >>> import torch\n\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import SrpPhat\n\n >>> xs_speech = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> fs = 16000\n\n >>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]\n >>> xs_noise = xs_noise.unsqueeze(0)\n\n >>> ss1 = xs_speech\n >>> ns1 = 0.05 * xs_noise\n >>> xs1 = ss1 + ns1\n\n >>> ss2 = xs_speech\n >>> ns2 = 0.20 * xs_noise\n >>> xs2 = ss2 + ns2\n\n >>> ss = torch.cat((ss1,ss2), dim=0)\n >>> ns = torch.cat((ns1,ns2), dim=0)\n >>> xs = torch.cat((xs1,xs2), dim=0)\n\n >>> mics = torch.zeros((4,3), dtype=torch.float)\n >>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])\n >>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])\n >>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n >>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> srpphat = SrpPhat(mics=mics)\n\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> doas = srpphat(XXs)\n \"\"\"\n\n def __init__(\n self,\n mics,\n space=\"sphere\",\n sample_rate=16000,\n speed_sound=343.0,\n eps=1e-20,\n ):\n\n super().__init__()\n\n # Generate the doas\n if space == \"sphere\":\n self.doas = sphere()\n\n if space == \"circle\":\n pass\n\n # Generate associated taus with the doas\n self.taus = doas2taus(\n self.doas, mics=mics, fs=sample_rate, c=speed_sound\n )\n\n # Save epsilon\n self.eps = eps\n\n def forward(self, XXs):\n \"\"\" Perform SRP-PHAT localization on a signal by computing a steering\n vector and then by using the utility function _srp_phat to extract the doas.\n The result is a tensor containing the directions of arrival (xyz coordinates\n (in meters) in the direction of the sound source). The output tensor\n has the format (batch, time_steps, 3).\n\n This localization method uses Global Coherence Field (GCF):\n https://www.researchgate.net/publication/221491705_Speaker_localization_based_on_oriented_global_coherence_field\n\n Arguments\n ---------\n XXs : tensor\n The covariance matrices of the input signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n \"\"\"\n # Get useful dimensions\n n_fft = XXs.shape[2]\n\n # Generate the steering vector\n As = steering(self.taus.to(XXs.device), n_fft)\n\n # Perform srp-phat\n doas = SrpPhat._srp_phat(XXs=XXs, As=As, doas=self.doas, eps=self.eps)\n\n return doas\n\n @staticmethod\n def _srp_phat(XXs, As, doas, eps=1e-20):\n \"\"\"Perform srp-phat to find the direction of arrival\n of the sound source. The result is a tensor containing the directions\n of arrival (xyz coordinates (in meters) in the direction of the sound source).\n The output tensor has the format: (batch, time_steps, 3).\n\n Arguments\n ---------\n XXs : tensor\n The covariance matrices of the input signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n As : tensor\n The steering vector that cover the all the potential directions\n of arrival. The tensor must have the format\n (n_doas, n_fft/2 + 1, 2, n_mics).\n doas : tensor\n All the possible directions of arrival that will be scanned. The\n tensor must have the format (n_doas, 3).\n \"\"\"\n\n # Putting on the right device\n As = As.to(XXs.device)\n doas = doas.to(XXs.device)\n\n # Get useful dimensions\n n_mics = As.shape[3]\n\n # Get the indices for the pairs of microphones\n idx = torch.triu_indices(n_mics, n_mics)\n\n # Generate the demixing vector from the steering vector\n As_1_re = As[:, :, 0, idx[0, :]]\n As_1_im = As[:, :, 1, idx[0, :]]\n As_2_re = As[:, :, 0, idx[1, :]]\n As_2_im = As[:, :, 1, idx[1, :]]\n Ws_re = As_1_re * As_2_re + As_1_im * As_2_im\n Ws_im = As_1_re * As_2_im - As_1_im * As_2_re\n Ws_re = Ws_re.reshape(Ws_re.shape[0], -1)\n Ws_im = Ws_im.reshape(Ws_im.shape[0], -1)\n\n # Get unique covariance values to reduce the number of computations\n XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=1)\n\n # Perform the phase transform\n XXs_re = XXs_val[:, :, :, 0, :]\n XXs_im = XXs_val[:, :, :, 1, :]\n XXs_re = XXs_re.reshape((XXs_re.shape[0], XXs_re.shape[1], -1))\n XXs_im = XXs_im.reshape((XXs_im.shape[0], XXs_im.shape[1], -1))\n XXs_abs = torch.sqrt(XXs_re ** 2 + XXs_im ** 2) + eps\n XXs_re_norm = XXs_re / XXs_abs\n XXs_im_norm = XXs_im / XXs_abs\n\n # Project on the demixing vectors, and keep only real part\n Ys_A = torch.matmul(XXs_re_norm, Ws_re.transpose(0, 1))\n Ys_B = torch.matmul(XXs_im_norm, Ws_im.transpose(0, 1))\n Ys = Ys_A - Ys_B\n\n # Get maximum points\n _, doas_idx = torch.max(Ys, dim=2)\n\n # Repeat for each frame\n doas = (doas[doas_idx, :])[:, XXs_idx, :]\n\n return doas\n\n\nclass Music(torch.nn.Module):\n \"\"\"Multiple Signal Classification (MUSIC) localization.\n\n Arguments\n ---------\n mics : tensor\n The cartesian coordinates (xyz) in meters of each microphone.\n The tensor must have the following format (n_mics, 3).\n space : string\n If this parameter is set to 'sphere', the localization will\n be done in 3D by searching in a sphere of possible doas. If\n it set to 'circle', the search will be done in 2D by searching\n in a circle. By default, this parameter is set to 'sphere'.\n Note: The 'circle' option isn't implemented yet.\n sample_rate : int\n The sample rate in Hertz of the signals to perform SRP-PHAT on.\n By default, this parameter is set to 16000 Hz.\n speed_sound : float\n The speed of sound in the medium. The speed is expressed in meters\n per second and the default value of this parameter is 343 m/s.\n eps : float\n A small value to avoid errors like division by 0. The default value\n of this parameter is 1e-20.\n n_sig : int\n An estimation of the number of sound sources. The default value is set\n to one source.\n\n Example\n -------\n >>> import torch\n\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import SrpPhat\n\n >>> xs_speech = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> fs = 16000\n\n >>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]\n >>> xs_noise = xs_noise.unsqueeze(0)\n\n >>> ss1 = xs_speech\n >>> ns1 = 0.05 * xs_noise\n >>> xs1 = ss1 + ns1\n\n >>> ss2 = xs_speech\n >>> ns2 = 0.20 * xs_noise\n >>> xs2 = ss2 + ns2\n\n >>> ss = torch.cat((ss1,ss2), dim=0)\n >>> ns = torch.cat((ns1,ns2), dim=0)\n >>> xs = torch.cat((xs1,xs2), dim=0)\n\n >>> mics = torch.zeros((4,3), dtype=torch.float)\n >>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])\n >>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])\n >>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n >>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> music = Music(mics=mics)\n\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> doas = music(XXs)\n \"\"\"\n\n def __init__(\n self,\n mics,\n space=\"sphere\",\n sample_rate=16000,\n speed_sound=343.0,\n eps=1e-20,\n n_sig=1,\n ):\n\n super().__init__()\n\n # Generate the doas\n if space == \"sphere\":\n self.doas = sphere()\n\n if space == \"circle\":\n pass\n\n # Generate associated taus with the doas\n self.taus = doas2taus(\n self.doas, mics=mics, fs=sample_rate, c=speed_sound\n )\n\n # Save epsilon\n self.eps = eps\n\n # Save number of signals\n self.n_sig = n_sig\n\n def forward(self, XXs):\n \"\"\"Perform MUSIC localization on a signal by computing a steering\n vector and then by using the utility function _music to extract the doas.\n The result is a tensor containing the directions of arrival (xyz coordinates\n (in meters) in the direction of the sound source). The output tensor\n has the format (batch, time_steps, 3).\n\n Arguments\n ---------\n XXs : tensor\n The covariance matrices of the input signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n \"\"\"\n\n # Get useful dimensions\n n_fft = XXs.shape[2]\n\n # Generate the steering vector\n As = steering(self.taus.to(XXs.device), n_fft)\n\n # Perform music\n doas = Music._music(\n XXs=XXs, As=As, doas=self.doas, n_sig=self.n_sig, eps=self.eps\n )\n\n return doas\n\n @staticmethod\n def _music(XXs, As, doas, n_sig, eps=1e-20):\n \"\"\"Perform multiple signal classification to find the\n direction of arrival of the sound source. The result\n has the format: (batch, time_steps, 3).\n\n Arguments\n ---------\n XXs : tensor\n The covariance matrices of the input signal. The tensor must\n have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).\n As : tensor\n The steering vector that covers the all the potential directions\n of arrival. The tensor must have the format.\n (n_doas, n_fft/2 + 1, 2, n_mics).\n doas : tensor\n All the possible directions of arrival that will be scanned. The\n tensor must have the format (n_doas, 3).\n n_sig : int\n The number of signals in the signal + noise subspace (default is 1).\n \"\"\"\n\n # Putting on the right device\n As = As.to(XXs.device)\n doas = doas.to(XXs.device)\n\n # Collecting data\n n_mics = As.shape[3]\n n_doas = As.shape[0]\n n_bins = As.shape[2]\n svd_range = n_mics - n_sig\n\n # Get unique values to reduce computations\n XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=1)\n\n # Singular value decomposition\n Us, _ = eig.svdl(XXs_val)\n\n # Format for the projection\n Us = Us.unsqueeze(2).repeat(1, 1, n_doas, 1, 1, 1, 1)\n Us_re = Us[..., range(0, svd_range), 0]\n Us_im = Us[..., range(0, svd_range), 1]\n\n # Fixing the format of the steering vector\n As = (\n As.unsqueeze(0)\n .unsqueeze(0)\n .unsqueeze(6)\n .permute(0, 1, 2, 3, 6, 5, 4)\n )\n As = As.repeat(Us.shape[0], Us.shape[1], 1, 1, 1, 1, 1)\n\n As_re = As[..., 0]\n As_im = As[..., 1]\n\n # Applying MUSIC's formula\n As_mm_Us_re = torch.matmul(As_re, Us_re) + torch.matmul(As_im, Us_im)\n As_mm_Us_im = torch.matmul(As_re, Us_im) - torch.matmul(As_im, Us_re)\n\n As_mm_Us_abs = torch.sqrt(As_mm_Us_re ** 2 + As_mm_Us_im ** 2)\n As_mm_Us_sum = torch.sum(As_mm_Us_abs, dim=5)\n\n As_As_abs = torch.sum(As_re ** 2, dim=5) + torch.sum(As_im ** 2, dim=5)\n\n Ps = (As_As_abs / (As_mm_Us_sum + eps)).squeeze(4)\n\n Ys = torch.sum(Ps, dim=3) / n_bins\n\n # Get maximum points\n _, doas_idx = torch.max(Ys, dim=2)\n\n doas = (doas[doas_idx, :])[:, XXs_idx, :]\n\n return doas\n\n\ndef doas2taus(doas, mics, fs, c=343.0):\n \"\"\"This function converts directions of arrival (xyz coordinates\n expressed in meters) in time differences of arrival (expressed in\n samples). The result has the following format: (batch, time_steps, n_mics).\n\n Arguments\n ---------\n doas : tensor\n The directions of arrival expressed with cartesian coordinates (xyz)\n in meters. The tensor must have the following format: (batch, time_steps, 3).\n mics : tensor\n The cartesian position (xyz) in meters of each microphone.\n The tensor must have the following format (n_mics, 3).\n fs : int\n The sample rate in Hertz of the signals.\n c : float\n The speed of sound in the medium. The speed is expressed in meters\n per second and the default value of this parameter is 343 m/s.\n\n Example\n -------\n >>> import torch\n\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.multi_mic import sphere, doas2taus\n\n >>> xs = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')\n >>> xs = xs.unsqueeze(0) # [batch, time, channels]\n >>> fs = 16000\n >>> mics = torch.zeros((4,3), dtype=torch.float)\n >>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])\n >>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])\n >>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n >>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\n\n >>> doas = sphere()\n >>> taus = doas2taus(doas, mics, fs)\n \"\"\"\n\n taus = (fs / c) * torch.matmul(doas.to(mics.device), mics.transpose(0, 1))\n\n return taus\n\n\ndef tdoas2taus(tdoas):\n \"\"\" This function selects the tdoas of each channel and put them\n in a tensor. The result has the following format:\n (batch, time_steps, n_mics).\n\n Arguments:\n ----------\n tdoas : tensor\n The time difference of arrival (TDOA) (in samples) for\n each timestamp. The tensor has the format\n (batch, time_steps, n_mics + n_pairs).\n\n Example\n -------\n >>> import torch\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import GccPhat, tdoas2taus\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> xs = xs_speech + 0.05 * xs_noise\n >>> xs = xs.unsqueeze(0)\n >>> fs = 16000\n >>>\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> gccphat = GccPhat()\n >>>\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> tdoas = gccphat(XXs)\n >>> taus = tdoas2taus(tdoas)\n \"\"\"\n\n n_pairs = tdoas.shape[len(tdoas.shape) - 1]\n n_channels = int(((1 + 8 * n_pairs) ** 0.5 - 1) / 2)\n taus = tdoas[..., range(0, n_channels)]\n\n return taus\n\n\ndef steering(taus, n_fft):\n \"\"\" This function computes a steering vector by using the time differences\n of arrival for each channel (in samples) and the number of bins (n_fft).\n The result has the following format: (batch, time_step, n_fft/2 + 1, 2, n_mics).\n\n Arguments:\n ----------\n taus : tensor\n The time differences of arrival for each channel. The tensor must have\n the following format: (batch, time_steps, n_mics).\n\n n_fft : int\n The number of bins resulting of the STFT. It is assumed that the\n argument \"onesided\" was set to True for the STFT.\n\n Example:\n --------f\n >>> import torch\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.multi_mic import GccPhat, tdoas2taus, steering\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')\n >>> xs = xs_speech + 0.05 * xs_noise\n >>> xs = xs.unsqueeze(0) # [batch, time, channels]\n >>> fs = 16000\n\n >>> stft = STFT(sample_rate=fs)\n >>> cov = Covariance()\n >>> gccphat = GccPhat()\n >>>\n >>> Xs = stft(xs)\n >>> n_fft = Xs.shape[2]\n >>> XXs = cov(Xs)\n >>> tdoas = gccphat(XXs)\n >>> taus = tdoas2taus(tdoas)\n >>> As = steering(taus, n_fft)\n \"\"\"\n\n # Collecting useful numbers\n pi = 3.141592653589793\n\n frame_size = int((n_fft - 1) * 2)\n\n # Computing the different parts of the steering vector\n omegas = 2 * pi * torch.arange(0, n_fft, device=taus.device) / frame_size\n omegas = omegas.repeat(taus.shape + (1,))\n taus = taus.unsqueeze(len(taus.shape)).repeat(\n (1,) * len(taus.shape) + (n_fft,)\n )\n\n # Assembling the steering vector\n a_re = torch.cos(-omegas * taus)\n a_im = torch.sin(-omegas * taus)\n a = torch.stack((a_re, a_im), len(a_re.shape))\n a = a.transpose(len(a.shape) - 3, len(a.shape) - 1).transpose(\n len(a.shape) - 3, len(a.shape) - 2\n )\n\n return a\n\n\ndef sphere(levels_count=4):\n \"\"\" This function generates cartesian coordinates (xyz) for a set\n of points forming a 3D sphere. The coordinates are expressed in\n meters and can be used as doas. The result has the format:\n (n_points, 3).\n\n Arguments\n ---------\n levels_count : int\n A number proportional to the number of points that the user\n wants to generate.\n - If levels_count = 1, then the sphere will have 42 points\n - If levels_count = 2, then the sphere will have 162 points\n - If levels_count = 3, then the sphere will have 642 points\n - If levels_count = 4, then the sphere will have 2562 points\n - If levels_count = 5, then the sphere will have 10242 points\n - ...\n By default, levels_count is set to 4.\n\n Example\n -------\n >>> import torch\n >>> from speechbrain.processing.multi_mic import sphere\n >>> doas = sphere()\n \"\"\"\n\n # Generate points at level 0\n\n h = (5.0 ** 0.5) / 5.0\n r = (2.0 / 5.0) * (5.0 ** 0.5)\n pi = 3.141592654\n\n pts = torch.zeros((12, 3), dtype=torch.float)\n pts[0, :] = torch.FloatTensor([0, 0, 1])\n pts[11, :] = torch.FloatTensor([0, 0, -1])\n pts[range(1, 6), 0] = r * torch.sin(2.0 * pi * torch.arange(0, 5) / 5.0)\n pts[range(1, 6), 1] = r * torch.cos(2.0 * pi * torch.arange(0, 5) / 5.0)\n pts[range(1, 6), 2] = h\n pts[range(6, 11), 0] = (\n -1.0 * r * torch.sin(2.0 * pi * torch.arange(0, 5) / 5.0)\n )\n pts[range(6, 11), 1] = (\n -1.0 * r * torch.cos(2.0 * pi * torch.arange(0, 5) / 5.0)\n )\n pts[range(6, 11), 2] = -1.0 * h\n\n # Generate triangles at level 0\n\n trs = torch.zeros((20, 3), dtype=torch.long)\n\n trs[0, :] = torch.LongTensor([0, 2, 1])\n trs[1, :] = torch.LongTensor([0, 3, 2])\n trs[2, :] = torch.LongTensor([0, 4, 3])\n trs[3, :] = torch.LongTensor([0, 5, 4])\n trs[4, :] = torch.LongTensor([0, 1, 5])\n\n trs[5, :] = torch.LongTensor([9, 1, 2])\n trs[6, :] = torch.LongTensor([10, 2, 3])\n trs[7, :] = torch.LongTensor([6, 3, 4])\n trs[8, :] = torch.LongTensor([7, 4, 5])\n trs[9, :] = torch.LongTensor([8, 5, 1])\n\n trs[10, :] = torch.LongTensor([4, 7, 6])\n trs[11, :] = torch.LongTensor([5, 8, 7])\n trs[12, :] = torch.LongTensor([1, 9, 8])\n trs[13, :] = torch.LongTensor([2, 10, 9])\n trs[14, :] = torch.LongTensor([3, 6, 10])\n\n trs[15, :] = torch.LongTensor([11, 6, 7])\n trs[16, :] = torch.LongTensor([11, 7, 8])\n trs[17, :] = torch.LongTensor([11, 8, 9])\n trs[18, :] = torch.LongTensor([11, 9, 10])\n trs[19, :] = torch.LongTensor([11, 10, 6])\n\n # Generate next levels\n\n for levels_index in range(0, levels_count):\n\n # 0\n # / \\\n # A---B\n # / \\ / \\\n # 1---C---2\n\n trs_count = trs.shape[0]\n subtrs_count = trs_count * 4\n\n subtrs = torch.zeros((subtrs_count, 6), dtype=torch.long)\n\n subtrs[0 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]\n subtrs[0 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 0]\n subtrs[0 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 0]\n subtrs[0 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 1]\n subtrs[0 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]\n subtrs[0 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 0]\n\n subtrs[1 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]\n subtrs[1 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 1]\n subtrs[1 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]\n subtrs[1 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 1]\n subtrs[1 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 1]\n subtrs[1 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 2]\n\n subtrs[2 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 2]\n subtrs[2 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 0]\n subtrs[2 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]\n subtrs[2 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 2]\n subtrs[2 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]\n subtrs[2 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 2]\n\n subtrs[3 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]\n subtrs[3 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 1]\n subtrs[3 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]\n subtrs[3 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 2]\n subtrs[3 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]\n subtrs[3 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 0]\n\n subtrs_flatten = torch.cat(\n (subtrs[:, [0, 1]], subtrs[:, [2, 3]], subtrs[:, [4, 5]]), axis=0\n )\n subtrs_sorted, _ = torch.sort(subtrs_flatten, axis=1)\n\n index_max = torch.max(subtrs_sorted)\n\n subtrs_scalar = (\n subtrs_sorted[:, 0] * (index_max + 1) + subtrs_sorted[:, 1]\n )\n\n unique_scalar, unique_indices = torch.unique(\n subtrs_scalar, return_inverse=True\n )\n\n unique_values = torch.zeros(\n (unique_scalar.shape[0], 2), dtype=unique_scalar.dtype\n )\n\n unique_values[:, 0] = torch.div(\n unique_scalar, index_max + 1, rounding_mode=\"floor\"\n )\n unique_values[:, 1] = unique_scalar - unique_values[:, 0] * (\n index_max + 1\n )\n\n trs = torch.transpose(torch.reshape(unique_indices, (3, -1)), 0, 1)\n\n pts = pts[unique_values[:, 0], :] + pts[unique_values[:, 1], :]\n pts /= torch.repeat_interleave(\n torch.unsqueeze(torch.sum(pts ** 2, axis=1) ** 0.5, 1), 3, 1\n )\n\n return pts\n",
"\"\"\"\nGeneralized Eigenvalue Decomposition.\n\nThis library contains different methods to adjust the format of\ncomplex Hermitian matrices and find their eigenvectors and\neigenvalues.\n\nAuthors\n * William Aris 2020\n * Francois Grondin 2020\n\"\"\"\n\nimport torch\n\n\ndef gevd(a, b=None):\n \"\"\"This method computes the eigenvectors and the eigenvalues\n of complex Hermitian matrices. The method finds a solution to\n the problem AV = BVD where V are the eigenvectors and D are\n the eigenvalues.\n\n The eigenvectors returned by the method (vs) are stored in a tensor\n with the following format (*,C,C,2).\n\n The eigenvalues returned by the method (ds) are stored in a tensor\n with the following format (*,C,C,2).\n\n Arguments\n ---------\n a : tensor\n A first input matrix. It is equivalent to the matrix A in the\n equation in the description above. The tensor must have the\n following format: (*,2,C+P).\n\n b : tensor\n A second input matrix. It is equivalent tot the matrix B in the\n equation in the description above. The tensor must have the\n following format: (*,2,C+P).\n This argument is optional and its default value is None. If\n b == None, then b is replaced by the identity matrix in the\n computations.\n\n Example\n -------\n\n Suppose we would like to compute eigenvalues/eigenvectors on the\n following complex Hermitian matrix:\n\n A = [ 52 34 + 37j 16 + j28 ;\n 34 - 37j 125 41 + j3 ;\n 16 - 28j 41 - j3 62 ]\n\n >>> a = torch.FloatTensor([[52,34,16,125,41,62],[0,37,28,0,3,0]])\n >>> vs, ds = gevd(a)\n\n This corresponds to:\n\n D = [ 20.9513 0 0 ;\n 0 43.9420 0 ;\n 0 0 174.1067 ]\n\n V = [ 0.085976 - 0.85184j -0.24620 + 0.12244j -0.24868 - 0.35991j ;\n -0.16006 + 0.20244j 0.37084 + 0.40173j -0.79175 - 0.087312j ;\n -0.43990 + 0.082884j -0.36724 - 0.70045j -0.41728 + 0 j ]\n\n where\n\n A = VDV^-1\n\n \"\"\"\n\n # Dimensions\n D = a.dim()\n P = a.shape[D - 1]\n C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))\n\n # Converting the input matrices to block matrices\n ash = f(a)\n\n if b is None:\n\n b = torch.zeros(a.shape, dtype=a.dtype, device=a.device)\n ids = torch.triu_indices(C, C)\n b[..., 0, ids[0] == ids[1]] = 1.0\n\n bsh = f(b)\n\n # Performing the Cholesky decomposition\n lsh = torch.linalg.cholesky(bsh)\n lsh_inv = torch.inverse(lsh)\n lsh_inv_T = torch.transpose(lsh_inv, D - 2, D - 1)\n\n # Computing the matrix C\n csh = torch.matmul(lsh_inv, torch.matmul(ash, lsh_inv_T))\n\n # Performing the eigenvalue decomposition\n es, ysh = torch.linalg.eigh(csh, UPLO=\"U\")\n\n # Collecting the eigenvalues\n dsh = torch.zeros(\n a.shape[slice(0, D - 2)] + (2 * C, 2 * C),\n dtype=a.dtype,\n device=a.device,\n )\n dsh[..., range(0, 2 * C), range(0, 2 * C)] = es\n\n # Collecting the eigenvectors\n vsh = torch.matmul(lsh_inv_T, ysh)\n\n # Converting the block matrices to full complex matrices\n vs = ginv(vsh)\n ds = ginv(dsh)\n\n return vs, ds\n\n\ndef svdl(a):\n \"\"\" Singular Value Decomposition (Left Singular Vectors).\n\n This function finds the eigenvalues and eigenvectors of the\n input multiplied by its transpose (a x a.T).\n\n The function will return (in this order):\n 1. The eigenvalues in a tensor with the format (*,C,C,2)\n 2. The eigenvectors in a tensor with the format (*,C,C,2)\n\n Arguments:\n ----------\n a : tensor\n A complex input matrix to work with. The tensor must have\n the following format: (*,2,C+P).\n\n Example:\n --------\n >>> import torch\n\n >>> from speechbrain.processing.features import STFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.decomposition import svdl\n >>> from speechbrain.dataio.dataio import read_audio_multichannel\n\n >>> xs_speech = read_audio_multichannel(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_noise = read_audio_multichannel('tests/samples/multi-mic/noise_diffuse.flac')\n >>> xs = xs_speech + 0.05 * xs_noise\n >>> xs = xs.unsqueeze(0).float()\n >>>\n >>> stft = STFT(sample_rate=16000)\n >>> cov = Covariance()\n >>>\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> us, ds = svdl(XXs)\n \"\"\"\n\n # Dimensions\n D = a.dim()\n P = a.shape[D - 1]\n C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))\n\n # Computing As * As_T\n ash = f(a)\n ash_T = torch.transpose(ash, -2, -1)\n\n ash_mm_ash_T = torch.matmul(ash, ash_T)\n\n # Finding the eigenvectors and eigenvalues\n es, ush = torch.linalg.eigh(ash_mm_ash_T, UPLO=\"U\")\n\n # Collecting the eigenvalues\n dsh = torch.zeros(ush.shape, dtype=es.dtype, device=es.device)\n dsh[..., range(0, 2 * C), range(0, 2 * C)] = torch.sqrt(es)\n\n # Converting the block matrices to full complex matrices\n us = ginv(ush)\n ds = ginv(dsh)\n\n return us, ds\n\n\ndef f(ws):\n \"\"\"Transform 1.\n\n This method takes a complex Hermitian matrix represented by its\n upper triangular part and converts it to a block matrix\n representing the full original matrix with real numbers.\n The output tensor will have the following format:\n (*,2C,2C)\n\n Arguments\n ---------\n ws : tensor\n An input matrix. The tensor must have the following format:\n (*,2,C+P)\n \"\"\"\n\n # Dimensions\n D = ws.dim()\n ws = ws.transpose(D - 2, D - 1)\n P = ws.shape[D - 2]\n C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))\n\n # Output matrix\n wsh = torch.zeros(\n ws.shape[0 : (D - 2)] + (2 * C, 2 * C),\n dtype=ws.dtype,\n device=ws.device,\n )\n ids = torch.triu_indices(C, C)\n wsh[..., ids[1] * 2, ids[0] * 2] = ws[..., 0]\n wsh[..., ids[0] * 2, ids[1] * 2] = ws[..., 0]\n wsh[..., ids[1] * 2 + 1, ids[0] * 2 + 1] = ws[..., 0]\n wsh[..., ids[0] * 2 + 1, ids[1] * 2 + 1] = ws[..., 0]\n wsh[..., ids[0] * 2, ids[1] * 2 + 1] = -1 * ws[..., 1]\n wsh[..., ids[1] * 2 + 1, ids[0] * 2] = -1 * ws[..., 1]\n wsh[..., ids[0] * 2 + 1, ids[1] * 2] = ws[..., 1]\n wsh[..., ids[1] * 2, ids[0] * 2 + 1] = ws[..., 1]\n\n return wsh\n\n\ndef finv(wsh):\n \"\"\" Inverse transform 1\n\n This method takes a block matrix representing a complex Hermitian\n matrix and converts it to a complex matrix represented by its\n upper triangular part. The result will have the following format:\n (*,2,C+P)\n\n Arguments\n ---------\n wsh : tensor\n An input matrix. The tensor must have the following format:\n (*,2C,2C)\n \"\"\"\n\n # Dimensions\n D = wsh.dim()\n C = int(wsh.shape[D - 1] / 2)\n P = int(C * (C + 1) / 2)\n\n # Output matrix\n ws = torch.zeros(\n wsh.shape[0 : (D - 2)] + (2, P), dtype=wsh.dtype, device=wsh.device\n )\n ids = torch.triu_indices(C, C)\n ws[..., 0, :] = wsh[..., ids[0] * 2, ids[1] * 2]\n ws[..., 1, :] = -1 * wsh[..., ids[0] * 2, ids[1] * 2 + 1]\n\n return ws\n\n\ndef g(ws):\n \"\"\"Transform 2.\n\n This method takes a full complex matrix and converts it to a block\n matrix. The result will have the following format:\n (*,2C,2C).\n\n Arguments\n ---------\n ws : tensor\n An input matrix. The tensor must have the following format:\n (*,C,C,2)\n \"\"\"\n\n # Dimensions\n D = ws.dim()\n C = ws.shape[D - 2]\n\n # Output matrix\n wsh = torch.zeros(\n ws.shape[0 : (D - 3)] + (2 * C, 2 * C),\n dtype=ws.dtype,\n device=ws.device,\n )\n wsh[..., slice(0, 2 * C, 2), slice(0, 2 * C, 2)] = ws[..., 0]\n wsh[..., slice(1, 2 * C, 2), slice(1, 2 * C, 2)] = ws[..., 0]\n wsh[..., slice(0, 2 * C, 2), slice(1, 2 * C, 2)] = -1 * ws[..., 1]\n wsh[..., slice(1, 2 * C, 2), slice(0, 2 * C, 2)] = ws[..., 1]\n\n return wsh\n\n\ndef ginv(wsh):\n \"\"\"Inverse transform 2.\n\n This method takes a complex Hermitian matrix represented by a block\n matrix and converts it to a full complex complex matrix. The\n result will have the following format:\n (*,C,C,2)\n\n Arguments\n ---------\n wsh : tensor\n An input matrix. The tensor must have the following format:\n (*,2C,2C)\n \"\"\"\n\n # Extracting data\n D = wsh.dim()\n C = int(wsh.shape[D - 1] / 2)\n\n # Output matrix\n ws = torch.zeros(\n wsh.shape[0 : (D - 2)] + (C, C, 2), dtype=wsh.dtype, device=wsh.device\n )\n ws[..., 0] = wsh[..., slice(0, 2 * C, 2), slice(0, 2 * C, 2)]\n ws[..., 1] = wsh[..., slice(1, 2 * C, 2), slice(0, 2 * C, 2)]\n\n return ws\n\n\ndef pos_def(ws, alpha=0.001, eps=1e-20):\n \"\"\"Diagonal modification.\n\n This method takes a complex Hermitian matrix represented by its upper\n triangular part and adds the value of its trace multiplied by alpha\n to the real part of its diagonal. The output will have the format:\n (*,2,C+P)\n\n Arguments\n ---------\n ws : tensor\n An input matrix. The tensor must have the following format:\n (*,2,C+P)\n\n alpha : float\n A coefficient to multiply the trace. The default value is 0.001.\n\n eps : float\n A small value to increase the real part of the diagonal. The\n default value is 1e-20.\n \"\"\"\n\n # Extracting data\n D = ws.dim()\n P = ws.shape[D - 1]\n C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))\n\n # Finding the indices of the diagonal\n ids_triu = torch.triu_indices(C, C)\n ids_diag = torch.eq(ids_triu[0, :], ids_triu[1, :])\n\n # Computing the trace\n trace = torch.sum(ws[..., 0, ids_diag], D - 2)\n trace = trace.view(trace.shape + (1,))\n trace = trace.repeat((1,) * (D - 2) + (C,))\n\n # Adding the trace multiplied by alpha to the diagonal\n ws_pf = ws.clone()\n ws_pf[..., 0, ids_diag] += alpha * trace + eps\n\n return ws_pf\n\n\ndef inv(x):\n \"\"\"Inverse Hermitian Matrix.\n\n This method finds the inverse of a complex Hermitian matrix\n represented by its upper triangular part. The result will have\n the following format: (*, C, C, 2).\n\n Arguments\n ---------\n x : tensor\n An input matrix to work with. The tensor must have the\n following format: (*, 2, C+P)\n\n Example\n -------\n >>> import torch\n >>>\n >>> from speechbrain.dataio.dataio import read_audio\n >>> from speechbrain.processing.features import STFT\n >>> from speechbrain.processing.multi_mic import Covariance\n >>> from speechbrain.processing.decomposition import inv\n >>>\n >>> xs_speech = read_audio(\n ... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'\n ... )\n >>> xs_noise = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')\n >>> xs = xs_speech + 0.05 * xs_noise\n >>> xs = xs.unsqueeze(0).float()\n >>>\n >>> stft = STFT(sample_rate=16000)\n >>> cov = Covariance()\n >>>\n >>> Xs = stft(xs)\n >>> XXs = cov(Xs)\n >>> XXs_inv = inv(XXs)\n \"\"\"\n\n # Dimensions\n d = x.dim()\n p = x.shape[-1]\n n_channels = int(round(((1 + 8 * p) ** 0.5 - 1) / 2))\n\n # Output matrix\n ash = f(pos_def(x))\n ash_inv = torch.inverse(ash)\n as_inv = finv(ash_inv)\n\n indices = torch.triu_indices(n_channels, n_channels)\n\n x_inv = torch.zeros(\n x.shape[slice(0, d - 2)] + (n_channels, n_channels, 2),\n dtype=x.dtype,\n device=x.device,\n )\n\n x_inv[..., indices[1], indices[0], 0] = as_inv[..., 0, :]\n x_inv[..., indices[1], indices[0], 1] = -1 * as_inv[..., 1, :]\n x_inv[..., indices[0], indices[1], 0] = as_inv[..., 0, :]\n x_inv[..., indices[0], indices[1], 1] = as_inv[..., 1, :]\n\n return x_inv\n"
] |
[
[
"torch.mean",
"torch.fmod",
"torch.max",
"torch.zeros",
"torch.sin",
"torch.cat",
"torch.sum",
"torch.unique",
"torch.FloatTensor",
"torch.complex",
"torch.irfft",
"torch.sqrt",
"torch.reshape",
"torch.sort",
"torch.triu_indices",
"torch.arange",
"torch.cos",
"torch.div",
"torch.LongTensor",
"torch.fft.irfft",
"torch.stack",
"torch.matmul",
"torch.gather"
],
[
"torch.linalg.cholesky",
"torch.transpose",
"torch.zeros",
"torch.sqrt",
"torch.eq",
"torch.sum",
"torch.linalg.eigh",
"torch.inverse",
"torch.matmul",
"torch.triu_indices"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zanecodes/asdf
|
[
"c1f6cf915409da5372c47ac725dc922b4bd52f7d",
"c1f6cf915409da5372c47ac725dc922b4bd52f7d"
] |
[
"asdf/tests/test_schema.py",
"asdf/commands/tests/test_edit.py"
] |
[
"import io\nfrom datetime import datetime\n\nfrom jsonschema import ValidationError\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nimport asdf\nfrom asdf import constants\nfrom asdf import get_config, config_context\nfrom asdf import extension\nfrom asdf import resolver\nfrom asdf import schema\nfrom asdf import types\nfrom asdf import util\nfrom asdf import yamlutil\nfrom asdf import tagged\nfrom asdf.tests import helpers, CustomExtension\nfrom asdf.exceptions import AsdfWarning, AsdfConversionWarning, AsdfDeprecationWarning\n\n\n\nclass TagReferenceType(types.CustomType):\n \"\"\"\n This class is used by several tests below for validating foreign type\n references in schemas and ASDF files.\n \"\"\"\n name = 'tag_reference'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n @classmethod\n def from_tree(cls, tree, ctx):\n node = {}\n node['name'] = tree['name']\n node['things'] = tree['things']\n return node\n\n\ndef test_tagging_scalars():\n pytest.importorskip('astropy', '3.0.0')\n from astropy import units as u\n\n yaml = \"\"\"\nunit: !unit/unit-1.0.0\n m\nnot_unit:\n m\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff) as ff:\n assert isinstance(ff.tree['unit'], u.UnitBase)\n assert not isinstance(ff.tree['not_unit'], u.UnitBase)\n assert isinstance(ff.tree['not_unit'], str)\n\n assert ff.tree == {\n 'unit': u.m,\n 'not_unit': 'm'\n }\n\n\ndef test_read_json_schema():\n \"\"\"Pytest to make sure reading JSON schemas succeeds.\n\n This was known to fail on Python 3.5 See issue #314 at\n https://github.com/asdf-format/asdf/issues/314 for more details.\n \"\"\"\n json_schema = helpers.get_test_data_path('example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"../core/ndarray-1.0.0\"\n\nrequired: [foobar]\n...\n \"\"\"\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_full_tag(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"tag:stsci.edu:asdf/core/ndarray-1.0.0\"\n\nrequired: [foobar]\n...\n \"\"\"\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_tag_address(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n%TAG !asdf! tag:stsci.edu:asdf/\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"http://stsci.edu/schemas/asdf/core/ndarray-1.0.0\"\n\nrequired: [foobar]\n...\n \"\"\"\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_file_url(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n%TAG !asdf! tag:stsci.edu:asdf/\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"{}\"\n\nrequired: [foobar]\n...\n \"\"\".format(extension.get_default_resolver()('tag:stsci.edu:asdf/core/ndarray-1.0.0'))\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_asdf_uri_scheme():\n subschema_content=\"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/bar\n\nbar:\n type: string\n...\n\"\"\"\n content = \"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/foo\n\ndefinitions:\n local_bar:\n type: string\n\ntype: object\nproperties:\n bar:\n $ref: asdf://somewhere.org/schemas/bar#/bar\n local_bar:\n $ref: '#/definitions/local_bar'\n...\n\"\"\"\n with asdf.config_context() as config:\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/foo\": content})\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/bar\": subschema_content})\n\n schema_tree = schema.load_schema(\"asdf://somewhere.org/schemas/foo\")\n instance = {\"bar\": \"baz\", \"local_bar\": \"foz\"}\n schema.validate(instance, schema=schema_tree)\n with pytest.raises(ValidationError):\n schema.validate({\"bar\": 12}, schema=schema_tree)\n\n\ndef test_load_schema_with_stsci_id():\n \"\"\"\n This tests the following edge case:\n - schema references a subschema provided by the new extension API\n - subschema URI shares a prefix with one of the old-style extension resolvers\n - resolve_references is enabled\n\n If we're not careful, the old-style resolver will mangle the URI and\n we won't be able to retrieve the schema content.\n \"\"\"\n subschema_content=\"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: http://stsci.edu/schemas/bar\n\nbar:\n type: string\n...\n\"\"\"\n content = \"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: http://stsci.edu/schemas/foo\n\ndefinitions:\n local_bar:\n type: string\n\ntype: object\nproperties:\n bar:\n $ref: http://stsci.edu/schemas/bar#/bar\n local_bar:\n $ref: '#/definitions/local_bar'\n...\n\"\"\"\n with asdf.config_context() as config:\n config.add_resource_mapping({\"http://stsci.edu/schemas/foo\": content})\n config.add_resource_mapping({\"http://stsci.edu/schemas/bar\": subschema_content})\n\n schema_tree = schema.load_schema(\"http://stsci.edu/schemas/foo\", resolve_references=True)\n instance = {\"bar\": \"baz\", \"local_bar\": \"foz\"}\n schema.validate(instance, schema=schema_tree)\n with pytest.raises(ValidationError):\n schema.validate({\"bar\": 12}, schema=schema_tree)\n\n\ndef test_schema_caching():\n # Make sure that if we request the same URL, we get a different object\n # (despite the caching internal to load_schema). Changes to a schema\n # dict should not impact other uses of that schema.\n\n s1 = schema.load_schema(\n 'http://stsci.edu/schemas/asdf/core/asdf-1.0.0')\n s2 = schema.load_schema(\n 'http://stsci.edu/schemas/asdf/core/asdf-1.0.0')\n assert s1 is not s2\n\n\ndef test_asdf_file_resolver_hashing():\n # Confirm that resolvers from distinct AsdfFile instances\n # hash to the same value (this allows schema caching to function).\n a1 = asdf.AsdfFile()\n a2 = asdf.AsdfFile()\n\n assert hash(a1.resolver) == hash(a2.resolver)\n assert a1.resolver == a2.resolver\n\n\ndef test_load_schema_from_resource_mapping():\n content = \"\"\"\nid: http://somewhere.org/schemas/razmataz-1.0.0\ntype: object\nproperties:\n foo:\n type: string\n bar:\n type: boolean\n\"\"\".encode(\"utf-8\")\n\n get_config().add_resource_mapping({\"http://somewhere.org/schemas/razmataz-1.0.0\": content})\n\n s = schema.load_schema(\"http://somewhere.org/schemas/razmataz-1.0.0\")\n\n assert s[\"id\"] == \"http://somewhere.org/schemas/razmataz-1.0.0\"\n\n\ndef test_flow_style():\n class CustomFlowStyleType(dict, types.CustomType):\n name = 'custom_flow'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class CustomFlowStyleExtension(CustomExtension):\n @property\n def types(self):\n return [CustomFlowStyleType]\n\n tree = {\n 'custom_flow': CustomFlowStyleType({'a': 42, 'b': 43})\n }\n\n buff = io.BytesIO()\n ff = asdf.AsdfFile(tree, extensions=CustomFlowStyleExtension())\n ff.write_to(buff)\n\n assert b' a: 42\\n b: 43' in buff.getvalue()\n\n\ndef test_style():\n class CustomStyleType(str, types.CustomType):\n name = 'custom_style'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class CustomStyleExtension(CustomExtension):\n @property\n def types(self):\n return [CustomStyleType]\n\n tree = {\n 'custom_style': CustomStyleType(\"short\")\n }\n\n buff = io.BytesIO()\n ff = asdf.AsdfFile(tree, extensions=CustomStyleExtension())\n ff.write_to(buff)\n\n assert b'|-\\n short\\n' in buff.getvalue()\n\n\ndef test_property_order():\n tree = {'foo': np.ndarray([1, 2, 3])}\n\n buff = io.BytesIO()\n ff = asdf.AsdfFile(tree)\n ff.write_to(buff)\n\n ndarray_schema = schema.load_schema(\n 'http://stsci.edu/schemas/asdf/core/ndarray-1.0.0')\n property_order = ndarray_schema['anyOf'][1]['propertyOrder']\n\n last_index = 0\n for prop in property_order:\n index = buff.getvalue().find(prop.encode('utf-8') + b':')\n if index != -1:\n assert index > last_index\n last_index = index\n\n\ndef test_invalid_nested():\n class CustomType(str, types.CustomType):\n name = 'custom'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class CustomTypeExtension(CustomExtension):\n @property\n def types(self):\n return [CustomType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/custom-1.0.0>\n foo\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n # This should cause a warning but not an error because without explicitly\n # providing an extension, our custom type will not be recognized and will\n # simply be converted to a raw type.\n with pytest.warns(AsdfConversionWarning, match=\"tag:nowhere.org:custom/custom-1.0.0\"):\n with asdf.open(buff):\n pass\n\n buff.seek(0)\n with pytest.raises(ValidationError):\n with asdf.open(buff, extensions=[CustomTypeExtension()]):\n pass\n\n # Make sure tags get validated inside of other tags that know\n # nothing about them.\n yaml = \"\"\"\narray: !core/ndarray-1.0.0\n data: [0, 1, 2]\n custom: !<tag:nowhere.org:custom/custom-1.0.0>\n foo\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.raises(ValidationError):\n with asdf.open(buff, extensions=[CustomTypeExtension()]):\n pass\n\n\ndef test_invalid_schema():\n s = {'type': 'integer'}\n schema.check_schema(s)\n\n s = {'type': 'foobar'}\n with pytest.raises(ValidationError):\n schema.check_schema(s)\n\n\ndef test_defaults():\n s = {\n 'type': 'object',\n 'properties': {\n 'a': {\n 'type': 'integer',\n 'default': 42\n }\n }\n }\n\n t = {}\n\n cls = schema._create_validator(schema.FILL_DEFAULTS)\n validator = cls(s)\n validator.validate(t, _schema=s)\n\n assert t['a'] == 42\n\n cls = schema._create_validator(schema.REMOVE_DEFAULTS)\n validator = cls(s)\n validator.validate(t, _schema=s)\n\n assert t == {}\n\n\ndef test_default_check_in_schema():\n s = {\n 'type': 'object',\n 'properties': {\n 'a': {\n 'type': 'integer',\n 'default': 'foo'\n }\n }\n }\n\n with pytest.raises(ValidationError):\n schema.check_schema(s)\n\n schema.check_schema(s, validate_default=False)\n\n\ndef test_check_complex_default():\n default_software = tagged.TaggedDict(\n {\"name\": \"asdf\", \"version\": \"2.7.0\"},\n \"tag:stsci.edu/asdf/core/software-1.0.0\"\n )\n\n s = {\n 'type': 'object',\n 'properties': {\n 'a': {\n 'type': 'object',\n 'tag': 'tag:stsci.edu/asdf/core/software-1.0.0',\n 'default': default_software\n }\n }\n }\n\n schema.check_schema(s)\n\n s['properties']['a']['tag'] = 'tag:stsci.edu/asdf/core/ndarray-1.0.0'\n with pytest.raises(ValidationError):\n schema.check_schema(s)\n\n\ndef test_fill_and_remove_defaults():\n class DefaultType(dict, types.CustomType):\n name = 'default'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class DefaultTypeExtension(CustomExtension):\n @property\n def types(self):\n return [DefaultType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/default-1.0.0>\n b: {}\n d: {}\n g: {}\n j:\n l: 362\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n assert 'a' in ff.tree['custom']\n assert ff.tree['custom']['a'] == 42\n assert ff.tree['custom']['b']['c'] == 82\n # allOf combiner should fill defaults from all subschemas:\n assert ff.tree['custom']['d']['e'] == 122\n assert ff.tree['custom']['d']['f'] == 162\n # anyOf combiners should be ignored:\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n # oneOf combiners should be ignored:\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n\n buff.seek(0)\n with pytest.warns(AsdfDeprecationWarning, match='do_not_fill_defaults'):\n with asdf.open(buff, extensions=[DefaultTypeExtension()],\n do_not_fill_defaults=True) as ff:\n assert 'a' not in ff.tree['custom']\n assert 'c' not in ff.tree['custom']['b']\n assert 'e' not in ff.tree['custom']['d']\n assert 'f' not in ff.tree['custom']['d']\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n ff.fill_defaults()\n assert 'a' in ff.tree['custom']\n assert ff.tree['custom']['a'] == 42\n assert 'c' in ff.tree['custom']['b']\n assert ff.tree['custom']['b']['c'] == 82\n assert ff.tree['custom']['b']['c'] == 82\n assert ff.tree['custom']['d']['e'] == 122\n assert ff.tree['custom']['d']['f'] == 162\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n ff.remove_defaults()\n assert 'a' not in ff.tree['custom']\n assert 'c' not in ff.tree['custom']['b']\n assert 'e' not in ff.tree['custom']['d']\n assert 'f' not in ff.tree['custom']['d']\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n\n buff.seek(0)\n with config_context() as config:\n config.legacy_fill_schema_defaults = False\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n assert 'a' not in ff.tree['custom']\n assert 'c' not in ff.tree['custom']['b']\n assert 'e' not in ff.tree['custom']['d']\n assert 'f' not in ff.tree['custom']['d']\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n\n\ndef test_one_of():\n \"\"\"\n Covers https://github.com/asdf-format/asdf/issues/809\n \"\"\"\n class OneOfType(dict, types.CustomType):\n name = 'one_of'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class OneOfTypeExtension(CustomExtension):\n @property\n def types(self):\n return [OneOfType]\n\n yaml = \"\"\"\none_of: !<tag:nowhere.org:custom/one_of-1.0.0>\n value: foo\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=[OneOfTypeExtension()]) as ff:\n assert ff['one_of']['value'] == 'foo'\n\n\ndef test_tag_reference_validation():\n class DefaultTypeExtension(CustomExtension):\n @property\n def types(self):\n return [TagReferenceType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/tag_reference-1.0.0>\n name:\n \"Something\"\n things: !core/ndarray-1.0.0\n data: [1, 2, 3]\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n custom = ff.tree['custom']\n assert custom['name'] == \"Something\"\n assert_array_equal(custom['things'], [1, 2, 3])\n\n\ndef test_foreign_tag_reference_validation():\n class ForeignTagReferenceType(types.CustomType):\n name = 'foreign_tag_reference'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n @classmethod\n def from_tree(cls, tree, ctx):\n node = {}\n node['a'] = tree['a']\n node['b'] = tree['b']\n return node\n\n class ForeignTypeExtension(CustomExtension):\n @property\n def types(self):\n return [TagReferenceType, ForeignTagReferenceType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/foreign_tag_reference-1.0.0>\n a: !<tag:nowhere.org:custom/tag_reference-1.0.0>\n name:\n \"Something\"\n things: !core/ndarray-1.0.0\n data: [1, 2, 3]\n b: !<tag:nowhere.org:custom/tag_reference-1.0.0>\n name:\n \"Anything\"\n things: !core/ndarray-1.0.0\n data: [4, 5, 6]\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=ForeignTypeExtension()) as ff:\n a = ff.tree['custom']['a']\n b = ff.tree['custom']['b']\n assert a['name'] == 'Something'\n assert_array_equal(a['things'], [1, 2, 3])\n assert b['name'] == 'Anything'\n assert_array_equal(b['things'], [4, 5, 6])\n\n\ndef test_self_reference_resolution():\n r = resolver.Resolver(CustomExtension().url_mapping, 'url')\n s = schema.load_schema(\n helpers.get_test_data_path('self_referencing-1.0.0.yaml'),\n resolver=r,\n resolve_references=True)\n assert '$ref' not in repr(s)\n assert s['anyOf'][1] == s['anyOf'][0]\n\n\ndef test_schema_resolved_via_entry_points():\n \"\"\"Test that entry points mappings to core schema works\"\"\"\n r = extension.get_default_resolver()\n tag = types.format_tag('stsci.edu', 'asdf', '1.0.0', 'fits/fits')\n url = extension.default_extensions.extension_list.tag_mapping(tag)\n\n s = schema.load_schema(url, resolver=r, resolve_references=True)\n assert tag in repr(s)\n\n\[email protected](\"num\", [constants.MAX_NUMBER+1, constants.MIN_NUMBER-1])\ndef test_max_min_literals(num):\n\n tree = {\n 'test_int': num,\n }\n\n with pytest.raises(ValidationError):\n asdf.AsdfFile(tree)\n\n tree = {\n 'test_list': [num],\n }\n\n with pytest.raises(ValidationError):\n asdf.AsdfFile(tree)\n\n tree = {\n num: 'test_key',\n }\n\n with pytest.raises(ValidationError):\n asdf.AsdfFile(tree)\n\n\[email protected](\"num\", [constants.MAX_NUMBER+1, constants.MIN_NUMBER-1])\[email protected](\"ttype\", [\"val\", \"list\", \"key\"])\ndef test_max_min_literals_write(num, ttype, tmpdir):\n outfile = tmpdir / \"test.asdf\"\n af = asdf.AsdfFile()\n\n # Validation doesn't occur here, so no warning/error will be raised.\n if ttype == \"val\":\n af.tree['test_int'] = num\n elif ttype == \"list\":\n af.tree['test_int'] = [num]\n else:\n af.tree[num] = 'test_key'\n\n # Validation will occur on write, though, so detect it.\n with pytest.raises(ValidationError):\n af.write_to(outfile)\n af.close()\n\n\[email protected](\"value\", [constants.MAX_NUMBER+1, constants.MIN_NUMBER-1])\ndef test_read_large_literal(value):\n yaml = f\"integer: {value}\"\n\n buff = helpers.yaml_to_asdf(yaml)\n\n with pytest.warns(AsdfWarning, match=\"Invalid integer literal value\"):\n with asdf.open(buff) as af:\n assert af['integer'] == value\n\n yaml = f\"{value}: foo\"\n\n buff = helpers.yaml_to_asdf(yaml)\n\n with pytest.warns(AsdfWarning, match=\"Invalid integer literal value\"):\n with asdf.open(buff) as af:\n assert af[value] == \"foo\"\n\n\[email protected](\n \"version,keys\",\n [\n (\"1.6.0\", [\"foo\", 42, True]),\n (\"1.5.0\", [\"foo\", 42, True, 3.14159, datetime.now(), b\"foo\", None]),\n ]\n)\ndef test_mapping_supported_key_types(keys, version):\n for key in keys:\n with helpers.assert_no_warnings():\n af = asdf.AsdfFile({key: \"value\"}, version=version)\n buff = io.BytesIO()\n af.write_to(buff)\n buff.seek(0)\n with asdf.open(buff) as af:\n assert af[key] == \"value\"\n\n\[email protected](\n \"version,keys\",\n [\n (\"1.6.0\", [3.14159, datetime.now(), b\"foo\", None, (\"foo\", \"bar\")]),\n ]\n)\ndef test_mapping_unsupported_key_types(keys, version):\n for key in keys:\n with pytest.raises(ValidationError, match=\"Mapping key .* is not permitted\"):\n af = asdf.AsdfFile({key: \"value\"}, version=version)\n buff = io.BytesIO()\n af.write_to(buff)\n\n\ndef test_nested_array():\n s = {\n 'type': 'object',\n 'properties': {\n 'stuff': {\n 'type': 'array',\n 'items': {\n 'type': 'array',\n 'items': [\n { 'type': 'integer' },\n { 'type': 'string' },\n { 'type': 'number' },\n ],\n 'minItems': 3,\n 'maxItems': 3\n }\n }\n }\n }\n\n good = dict(stuff=[[1, 'hello', 2], [4, 'world', 9.7]])\n schema.validate(good, schema=s)\n\n bads = [\n dict(stuff=[[1, 2, 3]]),\n dict(stuff=[12,'dldl']),\n dict(stuff=[[12, 'dldl']]),\n dict(stuff=[[1, 'hello', 2], [4, 5]]),\n dict(stuff=[[1, 'hello', 2], [4, 5, 6]])\n ]\n\n for b in bads:\n with pytest.raises(ValidationError):\n schema.validate(b, schema=s)\n\n\ndef test_nested_array_yaml(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n---\ntype: object\nproperties:\n stuff:\n type: array\n items:\n type: array\n items:\n - type: integer\n - type: string\n - type: number\n minItems: 3\n maxItems: 3\n...\n \"\"\"\n schema_path = tmpdir.join('nested.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path))\n schema.check_schema(schema_tree)\n\n good = dict(stuff=[[1, 'hello', 2], [4, 'world', 9.7]])\n schema.validate(good, schema=schema_tree)\n\n bads = [\n dict(stuff=[[1, 2, 3]]),\n dict(stuff=[12,'dldl']),\n dict(stuff=[[12, 'dldl']]),\n dict(stuff=[[1, 'hello', 2], [4, 5]]),\n dict(stuff=[[1, 'hello', 2], [4, 5, 6]])\n ]\n\n for b in bads:\n with pytest.raises(ValidationError):\n schema.validate(b, schema=schema_tree)\n\n\ndef test_type_missing_dependencies():\n pytest.importorskip('astropy', '3.0.0')\n\n class MissingType(types.CustomType):\n name = 'missing'\n organization = 'nowhere.org'\n version = (1, 1, 0)\n standard = 'custom'\n types = ['asdfghjkl12345.foo']\n requires = [\"ASDFGHJKL12345\"]\n\n class DefaultTypeExtension(CustomExtension):\n @property\n def types(self):\n return [MissingType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/missing-1.1.0>\n b: {foo: 42}\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.warns(AsdfConversionWarning, match=\"Failed to convert tag:nowhere.org:custom/missing-1.1.0\"):\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n assert ff.tree['custom']['b']['foo'] == 42\n\n\ndef test_assert_roundtrip_with_extension(tmpdir):\n called_custom_assert_equal = [False]\n\n class CustomType(dict, types.CustomType):\n name = 'custom_flow'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n @classmethod\n def assert_equal(cls, old, new):\n called_custom_assert_equal[0] = True\n\n class CustomTypeExtension(CustomExtension):\n @property\n def types(self):\n return [CustomType]\n\n tree = {\n 'custom': CustomType({'a': 42, 'b': 43})\n }\n\n def check(ff):\n assert isinstance(ff.tree['custom'], CustomType)\n\n with helpers.assert_no_warnings():\n helpers.assert_roundtrip_tree(\n tree, tmpdir, extensions=[CustomTypeExtension()])\n\n assert called_custom_assert_equal[0] is True\n\n\ndef test_custom_validation_bad(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree does not conform to the custom schema\n tree = {'stuff': 42, 'other_stuff': 'hello'}\n\n # Creating file without custom schema should pass\n with asdf.AsdfFile(tree) as ff:\n ff.write_to(asdf_file)\n\n # Creating file using custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path):\n pass\n\n # Opening file without custom schema should pass\n with asdf.open(asdf_file):\n pass\n\n # Opening file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_good(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'foo': {'x': 42, 'y': 10},\n 'bar': {'a': 'hello', 'b': 'banjo'}\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_pathlib(tmpdir):\n \"\"\"\n Make sure custom schema paths can be pathlib.Path objects\n\n See https://github.com/asdf-format/asdf/issues/653 for discussion.\n \"\"\"\n from pathlib import Path\n\n custom_schema_path = Path(helpers.get_test_data_path('custom_schema.yaml'))\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'foo': {'x': 42, 'y': 10},\n 'bar': {'a': 'hello', 'b': 'banjo'}\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_definitions_good(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_definitions.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'thing': { 'biz': 'hello', 'baz': 'world' }\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_definitions_bad(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_definitions.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree does NOT conform to the custom schema\n tree = {\n 'forb': { 'biz': 'hello', 'baz': 'world' }\n }\n\n # Creating file without custom schema should pass\n with asdf.AsdfFile(tree) as ff:\n ff.write_to(asdf_file)\n\n # Creating file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path):\n pass\n\n # Opening file without custom schema should pass\n with asdf.open(asdf_file):\n pass\n\n # Opening file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_external_ref_good(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_external_ref.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'foo': asdf.tags.core.Software(name=\"Microsoft Windows\", version=\"95\")\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_external_ref_bad(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_external_ref.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree does not conform to the custom schema\n tree = {\n 'foo': False\n }\n\n # Creating file without custom schema should pass\n with asdf.AsdfFile(tree) as ff:\n ff.write_to(asdf_file)\n\n # Creating file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path):\n pass\n\n # Opening file without custom schema should pass\n with asdf.open(asdf_file):\n pass\n\n # Opening file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_load_custom_schema_deprecated():\n custom_schema_path = helpers.get_test_data_path('custom_schema.yaml')\n\n with pytest.deprecated_call():\n schema.load_custom_schema(custom_schema_path)\n\n\ndef test_load_schema_resolve_local_refs_deprecated():\n custom_schema_path = helpers.get_test_data_path('custom_schema_definitions.yaml')\n\n with pytest.deprecated_call():\n schema.load_schema(custom_schema_path, resolve_local_refs=True)\n\n\ndef test_nonexistent_tag(tmpdir):\n \"\"\"\n This tests the case where a node is tagged with a type that apparently\n comes from an extension that is known, but the type itself can't be found.\n\n This could occur when a more recent version of an installed package\n provides the new type, but an older version of the package is installed.\n ASDF should still be able to open the file in this case, but it won't be\n able to restore the type.\n\n The bug that prompted this test results from attempting to load a schema\n file that doesn't exist, which is why this test belongs in this file.\n \"\"\"\n\n # This shouldn't ever happen, but it's a useful test case\n yaml = \"\"\"\na: !core/doesnt_exist-1.0.0\n hello\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.warns(AsdfWarning, match=\"Unable to locate schema file\"):\n with asdf.open(buff) as af:\n assert str(af['a']) == 'hello'\n\n # This is a more realistic case since we're using an external extension\n yaml = \"\"\"\na: !<tag:nowhere.org:custom/doesnt_exist-1.0.0>\n hello\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.warns(AsdfWarning, match=\"Unable to locate schema file\"):\n with asdf.open(buff, extensions=CustomExtension()) as af:\n assert str(af['a']) == 'hello'\n\n\[email protected](\"numpy_value,valid_types\", [\n (np.str_(\"foo\"), {\"string\"}),\n (np.bytes_(\"foo\"), set()),\n (np.float16(3.14), {\"number\"}),\n (np.float32(3.14159), {\"number\"}),\n (np.float64(3.14159), {\"number\"}),\n # Evidently float128 is not available on Windows:\n (getattr(np, \"float128\", np.float64)(3.14159), {\"number\"}),\n (np.int8(42), {\"number\", \"integer\"}),\n (np.int16(42), {\"number\", \"integer\"}),\n (np.int32(42), {\"number\", \"integer\"}),\n (np.longlong(42), {\"number\", \"integer\"}),\n (np.uint8(42), {\"number\", \"integer\"}),\n (np.uint16(42), {\"number\", \"integer\"}),\n (np.uint32(42), {\"number\", \"integer\"}),\n (np.uint64(42), {\"number\", \"integer\"}),\n (np.ulonglong(42), {\"number\", \"integer\"}),\n])\ndef test_numpy_scalar_type_validation(numpy_value, valid_types):\n def _assert_validation(jsonschema_type, expected_valid):\n validator = schema.get_validator()\n try:\n validator.validate(numpy_value, _schema={\"type\": jsonschema_type})\n except ValidationError:\n valid = False\n else:\n valid = True\n\n if valid is not expected_valid:\n if expected_valid:\n description = \"valid\"\n else:\n description = \"invalid\"\n assert False, \"Expected numpy.{} to be {} against jsonschema type '{}'\".format(\n type(numpy_value).__name__, description, jsonschema_type\n )\n\n for jsonschema_type in valid_types:\n _assert_validation(jsonschema_type, True)\n\n invalid_types = {\"string\", \"number\", \"integer\", \"boolean\", \"null\", \"object\"} - valid_types\n for jsonschema_type in invalid_types:\n _assert_validation(jsonschema_type, False)\n\n\ndef test_validator_visit_repeat_nodes():\n ctx = asdf.AsdfFile()\n node = asdf.tags.core.Software(name=\"Minesweeper\")\n tree = yamlutil.custom_tree_to_tagged_tree(\n {\"node\": node, \"other_node\": node, \"nested\": {\"node\": node}},\n ctx\n )\n\n visited_nodes = []\n def _test_validator(validator, value, instance, schema):\n visited_nodes.append(instance)\n\n validator = schema.get_validator(ctx=ctx, validators=util.HashableDict(type=_test_validator))\n validator.validate(tree)\n assert len(visited_nodes) == 1\n\n visited_nodes.clear()\n validator = schema.get_validator(\n validators=util.HashableDict(type=_test_validator),\n _visit_repeat_nodes=True\n )\n validator.validate(tree)\n assert len(visited_nodes) == 3\n\n\ndef test_tag_validator():\n content=\"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/foo\ntag: asdf://somewhere.org/tags/foo\n...\n\"\"\"\n with asdf.config_context() as config:\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/foo\": content})\n\n schema_tree = schema.load_schema(\"asdf://somewhere.org/schemas/foo\")\n instance = tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/foo\")\n schema.validate(instance, schema=schema_tree)\n with pytest.raises(ValidationError):\n schema.validate(tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/bar\"), schema=schema_tree)\n\n content=\"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/bar\ntag: asdf://somewhere.org/tags/bar-*\n...\n\"\"\"\n with asdf.config_context() as config:\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/bar\": content})\n\n schema_tree = schema.load_schema(\"asdf://somewhere.org/schemas/bar\")\n instance = tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/bar-2.5\")\n schema.validate(instance, schema=schema_tree)\n with pytest.raises(ValidationError):\n schema.validate(tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/foo-1.0\"), schema=schema_tree)\n",
"from contextlib import contextmanager\nimport os\nimport re\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nimport pytest\n\nimport asdf\nfrom asdf.commands import main\n\n\[email protected](params=asdf.versioning.supported_versions)\ndef version(request):\n return request.param\n\n\[email protected]\ndef create_editor(tmp_path):\n \"\"\"\n Fixture providing a function that generates an editor script.\n \"\"\"\n def _create_editor(pattern, replacement):\n if isinstance(pattern, str):\n pattern = pattern.encode(\"utf-8\")\n if isinstance(replacement, str):\n replacement = replacement.encode(\"utf-8\")\n\n editor_path = tmp_path / \"editor.py\"\n\n content = f\"\"\"import re\nimport sys\n\nwith open(sys.argv[1], \"rb\") as file:\n content = file.read()\n\ncontent = re.sub({pattern!r}, {replacement!r}, content, flags=(re.DOTALL | re.MULTILINE))\n\nwith open(sys.argv[1], \"wb\") as file:\n file.write(content)\n\"\"\"\n\n with editor_path.open(\"w\") as file:\n file.write(content)\n\n return f\"python3 {editor_path}\"\n\n return _create_editor\n\n\n@contextmanager\ndef file_not_modified(path):\n \"\"\"\n Assert that a file was not modified during the context.\n \"\"\"\n original_mtime = os.stat(path).st_mtime_ns\n\n yield\n\n assert os.stat(path).st_mtime_ns == original_mtime\n\n\[email protected]\ndef mock_input(monkeypatch):\n \"\"\"\n Fixture providing a function that mocks the edit module's\n built-in input function.\n \"\"\"\n @contextmanager\n def _mock_input(pattern, response):\n called = False\n def _input(prompt=None):\n nonlocal called\n called = True\n assert prompt is not None and re.match(pattern, prompt)\n return response\n\n with monkeypatch.context() as m:\n m.setattr(\"builtins.input\", _input)\n yield\n\n assert called, \"input was not called as expected\"\n\n return _mock_input\n\n\[email protected](autouse=True)\ndef default_mock_input(monkeypatch):\n \"\"\"\n Fixture that raises an error when the program\n requests unexpected input.\n \"\"\"\n def _input(prompt=None):\n raise AssertionError(f\"Received unexpected request for input: {prompt}\")\n\n monkeypatch.setattr(\"builtins.input\", _input)\n\n\ndef test_no_blocks(tmp_path, create_editor, version):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"foo: bar\", \"foo: baz\")\n\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n with asdf.open(file_path) as af:\n assert af[\"foo\"] == \"baz\"\n\n\ndef test_no_blocks_increase_size(tmp_path, create_editor, version):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n new_value = \"a\" * 32768\n os.environ[\"EDITOR\"] = create_editor(r\"foo: bar\", f\"foo: {new_value}\")\n\n # With no blocks, we can expand the existing file, so this case\n # shouldn't require confirmation from the user.\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n with asdf.open(file_path) as af:\n assert af[\"foo\"] == new_value\n\n\ndef test_no_blocks_decrease_size(tmp_path, create_editor, version):\n file_path = str(tmp_path/\"test.asdf\")\n\n original_value = \"a\" * 32768\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = original_value\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(f\"foo: {original_value}\", \"foo: bar\")\n\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n with asdf.open(file_path) as af:\n assert af[\"foo\"] == \"bar\"\n\n\ndef test_with_blocks(tmp_path, create_editor, version):\n file_path = str(tmp_path/\"test.asdf\")\n\n array1 = np.random.rand(100)\n array2 = np.random.rand(100)\n with asdf.AsdfFile(version=version) as af:\n af[\"array1\"] = array1\n af[\"array2\"] = array2\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"foo: bar\", \"foo: baz\")\n\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n with asdf.open(file_path) as af:\n assert af[\"foo\"] == \"baz\"\n assert_array_equal(af[\"array1\"], array1)\n assert_array_equal(af[\"array2\"], array2)\n\n\ndef test_with_blocks_increase_size(tmp_path, create_editor, version, mock_input):\n file_path = str(tmp_path/\"test.asdf\")\n\n array1 = np.random.rand(100)\n array2 = np.random.rand(100)\n with asdf.AsdfFile(version=version) as af:\n af[\"array1\"] = array1\n af[\"array2\"] = array2\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n new_value = \"a\" * 32768\n os.environ[\"EDITOR\"] = create_editor(r\"foo: bar\", f\"foo: {new_value}\")\n\n # Abort without updating the file\n with mock_input(r\"\\(c\\)ontinue or \\(a\\)bort\\?\", \"a\"):\n with file_not_modified(file_path):\n assert main.main_from_args([\"edit\", file_path]) == 1\n\n # Agree to allow the file to be rewritten\n with mock_input(r\"\\(c\\)ontinue or \\(a\\)bort\\?\", \"c\"):\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n with asdf.open(file_path) as af:\n assert af[\"foo\"] == new_value\n assert_array_equal(af[\"array1\"], array1)\n assert_array_equal(af[\"array2\"], array2)\n\n\n\ndef test_with_blocks_decrease_size(tmp_path, create_editor, version):\n file_path = str(tmp_path/\"test.asdf\")\n\n original_value = \"a\" * 32768\n\n array1 = np.random.rand(100)\n array2 = np.random.rand(100)\n with asdf.AsdfFile(version=version) as af:\n af[\"array1\"] = array1\n af[\"array2\"] = array2\n af[\"foo\"] = original_value\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(f\"foo: {original_value}\", \"foo: bar\")\n\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n with asdf.open(file_path) as af:\n assert af[\"foo\"] == \"bar\"\n assert_array_equal(af[\"array1\"], array1)\n assert_array_equal(af[\"array2\"], array2)\n\n\ndef test_no_changes(tmp_path, create_editor, version):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"non-existent-string\", \"non-existent-string\")\n\n with file_not_modified(file_path):\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n\ndef test_update_asdf_standard_version(tmp_path, create_editor, version, mock_input):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"^#ASDF_STANDARD .*?$\", \"#ASDF_STANDARD 999.999.999\")\n\n with file_not_modified(file_path):\n with mock_input(r\"\\(c\\)ontinue editing or \\(a\\)bort\\?\", \"a\"):\n assert main.main_from_args([\"edit\", file_path]) == 1\n\n\ndef test_update_yaml_version(tmp_path, create_editor, version, mock_input):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"^%YAML 1.1$\", \"%YAML 1.2\")\n\n with file_not_modified(file_path):\n with mock_input(r\"\\(c\\)ontinue editing or \\(a\\)bort\\?\", \"a\"):\n assert main.main_from_args([\"edit\", file_path]) == 1\n\n\ndef test_bad_yaml(tmp_path, create_editor, version, mock_input):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"foo: bar\", \"foo: [\")\n\n with file_not_modified(file_path):\n with mock_input(r\"\\(c\\)ontinue editing or \\(a\\)bort\\?\", \"a\"):\n assert main.main_from_args([\"edit\", file_path]) == 1\n\n\ndef test_validation_failure(tmp_path, create_editor, version, mock_input):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"array\"] = np.arange(100)\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"byteorder: .*?$\", \"byteorder: med\")\n\n with file_not_modified(file_path):\n with mock_input(r\"\\(c\\)ontinue editing, \\(f\\)orce update, or \\(a\\)bort\\?\", \"a\"):\n assert main.main_from_args([\"edit\", file_path]) == 1\n\n with mock_input(r\"\\(c\\)ontinue editing, \\(f\\)orce update, or \\(a\\)bort\\?\", \"f\"):\n assert main.main_from_args([\"edit\", file_path]) == 0\n\n with open(file_path, \"rb\") as f:\n content = f.read()\n assert b\"byteorder: med\" in content\n\n\ndef test_asdf_open_failure(tmp_path, create_editor, version, mock_input):\n file_path = str(tmp_path/\"test.asdf\")\n\n with asdf.AsdfFile(version=version) as af:\n af[\"foo\"] = \"bar\"\n af.write_to(file_path)\n\n os.environ[\"EDITOR\"] = create_editor(r\"^#ASDF .*?$\", \"#HJKL 1.0.0\")\n\n with file_not_modified(file_path):\n with mock_input(r\"\\(c\\)ontinue editing or \\(a\\)bort\\?\", \"a\"):\n assert main.main_from_args([\"edit\", file_path]) == 1\n\n\ndef test_non_asdf_file(tmp_path):\n file_path = str(tmp_path/\"test.asdf\")\n\n with open(file_path, \"w\") as f:\n f.write(\"Dear diary...\")\n\n with file_not_modified(file_path):\n assert main.main_from_args([\"edit\", file_path]) == 1\n"
] |
[
[
"numpy.uint32",
"numpy.uint8",
"numpy.float16",
"numpy.int32",
"numpy.int8",
"numpy.ndarray",
"numpy.ulonglong",
"numpy.int16",
"numpy.testing.assert_array_equal",
"numpy.longlong",
"numpy.uint16",
"numpy.float64",
"numpy.bytes_",
"numpy.float32",
"numpy.str_",
"numpy.uint64"
],
[
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.random.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
argearriojas/ModEx
|
[
"b440fb69870960e2c088d1f9afdfdce0beea1dda",
"b440fb69870960e2c088d1f9afdfdce0beea1dda"
] |
[
"Scripts/.ipynb_checkpoints/Main-checkpoint.py",
"Scripts/.ipynb_checkpoints/pubtator-checkpoint.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport pandas as pd\nimport sys\nfrom Bio import Entrez\nfrom Rules_Class import Rules\nimport functions as fn\nfrom configCHIP import output_directory\nfrom configCHIP import input_directory\nfrom configCHIP import result_directory\n#import configCHIP\n#part=sys.argv[1]\npart='part0'\n\nPositive=[]\n[Positive.append(line.strip().upper()) for line in open(input_directory+\"Positive.txt\")]\nNegative=[]\n[Negative.append(line.strip().upper()) for line in open(input_directory+\"Negative.txt\")]\n\ngenes_ents=input_directory + \"ALL_Human_Genes_Info.csv\" #NCBI\ngenes=pd.read_csv(genes_ents,sep=',',header=(0))\ngenes.fillna('', inplace=True)\n\nlookup_ids=pd.read_csv(input_directory+\"ncbi_id_lookup.csv\",sep='\\t',header=(0))\n\nchip_ents=input_directory + \"ChIPfilter-1K-950_ents.txt\"\nrels=pd.read_csv(output_directory+part+\".csv\",sep='\\t',header=(0),dtype=object)\nents=pd.read_csv(chip_ents,sep='\\t',header=(0))\n\n\n\nCHIP_result=pd.DataFrame(columns=['srcuid','trguid','src_entrez','trg_entrez','srcname','trgname','find_pmid','all_pmids','mode','score','evi_pmid','evi_sent','report'])\n\n\nfor row in rels.iterrows():\n query_genes, query_id=fn.find_names(row,ents,genes)\n query_id=[int(query_id[0]),int(query_id[1])]\n print(query_genes)\n query_genes, query_id,single_gene,single_id =fn.make_query(genes,lookup_ids,query_genes,query_id)\n status=''\n try:\n myterm=fn.term_maker(single_gene,genes)\n #### ESearch: Searching the Entrez databases\n Entrez.email=\"[email protected]\"\n handle=Entrez.esearch(db=\"pubmed\", term=myterm, retmax=100000000)\n record=Entrez.read(handle)\n except:\n status+=\"Enterz Fetch Error|||\"\n print(status)\n CHIP_result=CHIP_result.append({'srcuid':row[1]['srcuid'],'trguid':row[1]['trguid'],'src_entrez':single_id[0],'trg_entrez':single_id[1],'srcname':single_gene[0],'trgname':single_gene[1],'find_pmid':None,'all_pmids':None,'mode':None,'score':None,'evi_pmid':None,'evi_sent':None,'report':status},ignore_index=True)\n continue\n PubIDs = record[\"IdList\"]\n if(len(PubIDs)>0):\n sum_ranks=[]\n evi_pmids=[]\n evi_sentence=[]\n all_pmids=';'.join(PubIDs)\n for PubID in PubIDs:\n abstract=''\n ranks=[]\n annot_df=pd.DataFrame(columns=['type','id','text','offset','length'])\n try:\n annot_df, abstract=fn.pubtator_annot(annot_df,PubID)\n except:\n abstract=fn.ret_abstract(PubID)\n if(abstract=='?'):\n status+=\"PMID=[\"+PubID+\"] does not exist any more|||\"\n continue # remove it from the output results in TRRUST\n else:\n status+=\"PMID=[\"+PubID+\"] PubTator Response is not readable, Try to annotate manually|||\"\n #print(status)\n# try:\n# beCAS_lookup_full=fn.beCAS_lookup(PubID,query_id)\n# beCAS_lookup=beCAS_lookup_full[['type','id','text','offset','length']]\n# annot_df=pd.concat([annot_df,beCAS_lookup], ignore_index=True)\n# except:\n# status+=\"beCAS Server error|||\"\n\n lookup_results=fn.lookup_annot(abstract,query_genes,query_id,lookup_ids)\n annot_df=annot_df.append(lookup_results)\n# surface_annot=fn.surface_similarity(abstract, genes, query_genes, query_id,lookup_ids,single_id)\n# annot_df=annot_df.append(surface_annot)\n annot_df=annot_df.drop_duplicates(subset=['id','offset'])\n\n annot_df=fn.multiple_taxonomy(annot_df, query_id)\n\n\n annot_df=annot_df.reset_index(drop=True)\n candidate_sentences, covered=fn.candidate_sentence(annot_df,abstract,query_id)\n if(len(candidate_sentences.index)==0):\n status+=\"PMID=[\"+PubID+\"] No co-existed sentences found in the abstract|||\"\n #print(status)\n continue\n for sentence in candidate_sentences.itertuples():\n obj=Rules(Positive,Negative,annot_df,covered,abstract,query_genes,query_id,sentence)\n depgraphs=fn.dep_parser('9000',sentence,annot_df,query_id,single_id,Positive,Negative,2)\n if(depgraphs):\n try:\n obj. multiplication_score(depgraphs, single_id)\n except:\n status+=\"PMID=[\"+PubID+\"] dependency graph score error|||\"\n else:\n status+=\"PMID=[\"+PubID+\"] dependency graph co-occurance of single ids error|||\"\n\n #obj.search_ranking()\n ranks.append(obj.rank)\n if(obj.rank!=0):\n evi_sentence.append('['+PubID+']'+sentence.sentence)\n evi_pmids.append(PubID)\n if(len(ranks)!=0):\n sum_ranks.append(sum(ranks))\n mode=''\n rank_T=sum(sum_ranks)\n if(rank_T>0):\n mode='positive'\n if(rank_T<0):\n mode='negative'\n\n evi_sentence=';'.join(evi_sentence)\n evi_pmids=';'.join(evi_pmids)\n CHIP_result=CHIP_result.append({'srcuid':row[1]['srcuid'],'trguid':row[1]['trguid'],'src_entrez':single_id[0],'trg_entrez':single_id[1],'srcname':single_gene[0],'trgname':single_gene[1],'find_pmid':str(len(all_pmids)),'all_pmids':all_pmids,'mode':mode,'score':str(rank_T),'evi_pmid':evi_pmids,'evi_sent':evi_sentence,'report':status},ignore_index=True)\n else:\n status+=\"Not found any PMIDs for this interaction\"\n print(status)\n CHIP_result=CHIP_result.append({'srcuid':row[1]['srcuid'],'trguid':row[1]['trguid'],'src_entrez':single_id[0],'trg_entrez':single_id[1],'srcname':single_gene[0],'trgname':single_gene[1],'find_pmid':str('0'),'all_pmids':None,'mode':None,'score':None,'evi_pmid':None,'evi_sent':None,'report':status},ignore_index=True)\n\n\nCHIP_result.to_csv(result_directory+part+\"-CHIP-1K-950-result.csv\",sep = '\\t')",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport pandas as pd\nfrom Bio import Entrez\nfrom Rules_Class import Rules\nimport functions as fn\nimport networkx as nx\n\n\ndirectory=\"/home/saman/Dropbox/UMB/Dr-Koroush/Code/Data/\"\nPositive=[]\n[Positive.append(line.strip().upper()) for line in open(directory+\"Positive.txt\")]\nNegative=[]\n[Negative.append(line.strip().upper()) for line in open(directory+\"Negative.txt\")]\n\ngenes_ents=directory + \"ALL_Human_Genes_Info.csv\" #NCBI\ngenes=pd.read_csv(genes_ents,sep=',',header=(0))\ngenes.fillna('', inplace=True)\n\nlookup_ids=pd.read_csv(directory+\"ncbi_id_lookup.csv\",sep='\\t',header=(0))\n\n\nquery_genes=['AATF','BAX'] #Symbol MUST [TF,Target]\nquery_id=[26574,581]#NCBI ID MUST [TF,Target]\nPubIDs =['22909821']\nquery_genes, query_id,single_gene,single_id =fn.make_query(genes,lookup_ids,query_genes,query_id)\nmyterm=fn.term_maker(single_gene,genes)\n\n\nEntrez.email=\"[email protected]\"\nhandle=Entrez.esearch(db=\"pubmed\", term=myterm, retmax=100000000)\nrecord=Entrez.read(handle)\nIDlists = record[\"IdList\"]\nif(len(PubIDs)>0):\n sum_ranks=[]\n for PubID in PubIDs:\n ranks=[]\n annot_df=pd.DataFrame(columns=['type','id','text','offset','length'])\n try:\n annot_df, abstract=fn.pubtator_annot(annot_df,PubID)\n except:\n abstract=fn.ret_abstract(PubID)\n if(abstract=='?'):\n print(\"PMID=[\"+PubID+\"] does not exist any more!\")\n continue # remove it from the output results in TRRUST\n else:\n print(\"PubTator Response is not readable...!\")\n print(\"Try to annotate manually...\")\n try:\n beCAS_lookup_full=fn.beCAS_lookup(PubID,query_id)\n beCAS_lookup=beCAS_lookup_full[['type','id','text','offset','length']]\n annot_df=pd.concat([annot_df,beCAS_lookup], ignore_index=True)\n except:\n print(\"beCAS Server error...!\")\n\n #add surface similarity\n lookup_results=fn.lookup_annot(abstract,query_genes,query_id,lookup_ids)\n annot_df=pd.concat([annot_df,lookup_results],ignore_index=True)\n surface_annot=fn.surface_similarity(abstract, genes, query_genes, query_id,lookup_ids,single_id)\n annot_df=pd.concat([annot_df,surface_annot],ignore_index=True)\n annot_df=annot_df.drop_duplicates(subset=['id','offset'])\n\n annot_df=fn.multiple_taxonomy(annot_df, query_id)\n\n #depgraphs=fn.dep_parser('9000',abstract,annot_df,query_id,single_id,Positive,Negative)\n #node_topo=nx.topological_sort(depgraphs)\n #for node in depgraphs:\n #if depgraphs.out_degree(node)==0: #it's a leaf\n # paths.append(nx.shortest_path(G, root, node))\n\n\n\n\n annot_df=annot_df.reset_index(drop=True)\n candidate_sentences, covered=fn.candidate_sentence(annot_df,abstract,query_id)\n\n if(len(candidate_sentences.index)==0):\n print('No co-existed sentences found in the abstract...!')\n continue\n target_sentences=[]\n for sentence in candidate_sentences.itertuples():\n obj=Rules(Positive,Negative,annot_df,covered,abstract,query_genes,query_id,sentence)\n depgraphs=fn.dep_parser('9000',sentence,annot_df,query_id,single_id,Positive,Negative,2)\n if(depgraphs):\n obj. multiplication_score(depgraphs, single_id)\n else:\n continue\n #obj.search_ranking()\n ranks.append(obj.rank)\n if(obj.rank!=0):\n target_sentences.append([sentence.sentence,obj.rank])\n\n rank_T1=sum(ranks)\n mode=''\n if(len(ranks)==0):\n continue\n if(rank_T1==0): sum_ranks.append(rank_T1)\n if(rank_T1>0): mode='positive'\n if(rank_T1<0): mode='negative'\n for sentence in target_sentences:\n print(str(single_id[0]) + \"\\t\" + str(single_id[1]) + \"\\t\" + single_gene[0]+ \"\\t\" + single_gene[1] + \"\\t\" + mode + \"\\t\" + str(sentence[1]) + \"\\t\" + sentence[0] + \"\\n\")\n\n sum_ranks.append(rank_T1)\n\n\n rank_T2=sum(sum_ranks)\n if(len(sum_ranks)==0):\n print(\"There is no ranking value...!\")\n if(rank_T2==0 and len(sum_ranks)!=0):\n print(\"The rank value is zero...!\")\n if(rank_T2>0):\n print(str(single_id[0]) + \"\\t\" + str(single_id[1]) + \"\\t\" + single_gene[0]+ \"\\t\" + single_gene[1] + \"\\t\" + \"positive\" + \"\\t\" + str(rank_T2) + \"\\n\")\n if(rank_T2<0):\n print(str(single_id[0]) + \"\\t\" + str(single_id[1]) + \"\\t\" + single_gene[0]+ \"\\t\" + single_gene[1] + \"\\t\" + \"negative\" + \"\\t\" + str(rank_T2) + \"\\n\")\nelse:\n print(\"No PMID found for the interacion...!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Erotemic/misc
|
[
"6f8460a690d05e7e0117becc6cae9902cbe2cedd",
"6f8460a690d05e7e0117becc6cae9902cbe2cedd"
] |
[
"tests/python/bench_template.py",
"notes/hdd_rates.py"
] |
[
"\n\ndef benchmark_template():\n import ubelt as ub\n import pandas as pd\n import timerit\n\n def method1(x, y, z):\n ret = []\n for i in range((x + y) * z):\n ret.append(i)\n return ret\n\n def method2(x, y, z):\n ret = [i for i in range((x + y) * z)]\n return ret\n\n method_lut = locals() # can populate this some other way\n\n ti = timerit.Timerit(100, bestof=10, verbose=2)\n\n basis = {\n 'method': ['method1', 'method2'],\n 'x': list(range(7)),\n 'y': [0, 100],\n 'z': [2, 3]\n # 'param_name': [param values],\n }\n xlabel = 'x'\n kw_labels = ['x', 'y', 'z']\n group_labels = {\n 'style': ['y'],\n 'size': ['z'],\n }\n group_labels['hue'] = list(\n (ub.oset(basis) - {xlabel}) - set.union(*map(set, group_labels.values())))\n grid_iter = list(ub.named_product(basis))\n\n # For each variation of your experiment, create a row.\n rows = []\n for params in grid_iter:\n group_keys = {}\n for gname, labels in group_labels.items():\n group_keys[gname + '_key'] = ub.repr2(\n ub.dict_isect(params, labels), compact=1, si=1)\n key = ub.repr2(params, compact=1, si=1)\n kwargs = ub.dict_isect(params.copy(), kw_labels)\n method = method_lut[params['method']]\n # Timerit will run some user-specified number of loops.\n # and compute time stats with similar methodology to timeit\n for timer in ti.reset(key):\n # Put any setup logic you dont want to time here.\n # ...\n with timer:\n # Put the logic you want to time here\n method(**kwargs)\n row = {\n 'mean': ti.mean(),\n 'min': ti.min(),\n 'key': key,\n **group_keys,\n **params,\n }\n rows.append(row)\n\n # The rows define a long-form pandas data array.\n # Data in long-form makes it very easy to use seaborn.\n data = pd.DataFrame(rows)\n data = data.sort_values('min')\n print(data)\n\n plot = True\n if plot:\n # import seaborn as sns\n # kwplot autosns works well for IPython and script execution.\n # not sure about notebooks.\n import kwplot\n sns = kwplot.autosns()\n\n plotkw = {}\n for gname, labels in group_labels.items():\n if labels:\n plotkw[gname] = gname + '_key'\n\n # Your variables may change\n ax = kwplot.figure(fnum=1, doclf=True).gca()\n sns.lineplot(data=data, x=xlabel, y='min', marker='o', ax=ax, **plotkw)\n ax.set_title('Benchmark')\n ax.set_xlabel('A better x-variable description')\n ax.set_ylabel('A better y-variable description')\n",
"def hard_drive_failure_analysis():\n \"\"\"\n References:\n https://www.backblaze.com/blog/backblaze-hard-drive-stats-q2-2020/\n\n https://f001.backblazeb2.com/file/Backblaze_Blog/Q2_2020_Drive_Stats_Chart_Data.zip\n https://f001.backblazeb2.com/file/Backblaze_Blog/Q2_2019_Drive_Stats_Chart_Data.zip\n\n \"\"\"\n import ubelt as ub\n import random\n import time\n\n url_template = 'https://f001.backblazeb2.com/file/Backblaze_Blog/{}_{}_Drive_Stats_Chart_Data.zip'\n success_urls = []\n failed_urls = []\n got_fpaths = []\n for year in range(2017, 2021):\n for q in [1, 2, 3, 4]:\n try:\n url = url_template.format('Q' + str(q), year)\n print('url = {!r}'.format(url))\n # Play nice, don't crash their servers\n fpath = ub.grabdata(url)\n print('Got fpath = {!r}'.format(fpath))\n success_urls.append(url)\n got_fpaths.append(fpath)\n if 0:\n # only need to do this the first time\n time.sleep(1 + random.random())\n except Exception:\n print('Failed to grab url = {!r}'.format(url))\n failed_urls.append(url)\n pass\n\n got_fpaths = [\n '/home/joncrall/.cache/ubelt/Q3_2017_Drive_Stats_Chart_Data.zip',\n '/home/joncrall/.cache/ubelt/Q1_2018_Drive_Stats_Chart_Data.zip',\n '/home/joncrall/.cache/ubelt/Q2_2018_Drive_Stats_Chart_Data.zip',\n '/home/joncrall/.cache/ubelt/Q3_2018_Drive_Stats_Chart_Data.zip',\n '/home/joncrall/.cache/ubelt/Q1_2019_Drive_Stats_Chart_Data.zip',\n '/home/joncrall/.cache/ubelt/Q2_2019_Drive_Stats_Chart_Data.zip',\n '/home/joncrall/.cache/ubelt/Q2_2020_Drive_Stats_Chart_Data.zip'\n ]\n\n from torch_liberator.util.util_zip import zopen, split_archive\n split_archive(fpath)\n\n import zipfile\n\n import pandas as pd\n\n rates = []\n\n for fpath in got_fpaths:\n myzip = zipfile.ZipFile(fpath, 'r')\n name = ub.peek([name for name in myzip.namelist() if not name.startswith('_')])\n internal_fpath = fpath + '/' + name\n\n internal_file = zopen(internal_fpath, mode='rb')\n table = pd.read_excel(internal_file)\n\n found = None\n class BreakException(Exception):\n pass\n try:\n for rx, row in table.iterrows():\n for cx, col in enumerate(row):\n if isinstance(col, str):\n col = col.replace('\\n', '').replace(' ', '').lower()\n print('col = {!r}'.format(col))\n if col in {'afr', 'annualizedfailurerate', 'failurerate'}:\n found = (rx, cx)\n raise BreakException\n\n except BreakException:\n pass\n\n if found is None:\n raise Exception\n\n rx, cx = found\n print('table = {!r}'.format(table))\n\n final_rate = table.iloc[-1].iloc[cx]\n rates.append(final_rate)\n\n drive_fails = table.iloc[-1].iloc[-2]\n drive_days = table.iloc[-1].iloc[-3]\n drive_count = table.iloc[-1].iloc[-4]\n print('final_rate = {!r}'.format(final_rate))\n\n # Lets say just overall every year your HDD has a 1.45% chance of failing\n\n annualize_fail_rate = 0.0145\n\n \"\"\"\n\n rate = expected # events in 1 time period\n\n P(k events in t timesteps) = exp(- rate * t) * ((rate * time) ** k) / k!\n\n\n The probability we wait more than t for an event is\n\n P(T > t) = exp(-rate * t)\n\n The probability that the even will happen before time t is:\n\n P(T <= t) = 1 - exp(-rate * t)\n \"\"\"\n\n import scipy.stats\n import numpy as np\n # According to [1] There is a ~1.45% chance of a drive failing each year\n # .. [1] https://www.backblaze.com/blog/backblaze-hard-drive-stats-q2-2020/\n\n # We can model a Poisson distribution to ask some questions\n λ = 1.45 / 100 # probability of failure within a year\n y = 1 # number of years\n k = 1 # number of events (failures)\n\n def probabilities_for_y_years(y):\n ##\n ##\n # The PMF is the probability that exactly k failures occur in y years\n print('\\nIn y={} years we can expect'.format(y))\n\n rv = scipy.stats.poisson(mu=λ * y)\n\n k = 1\n p_one_fail = rv.pmf(k)\n print('p_one_fail = {:.4f}%'.format(p_one_fail * 100))\n k = 2\n p_two_fail = rv.pmf(k)\n print('p_two_fail = {:.4f}%'.format(p_two_fail * 100))\n\n # The CDF(k) is the probability the k or fewer failures occur in y years.\n # So, the probability k or more events occur is 1 - CDF(k - 1)\n # k or fewer, so 1 - CDF is the probability more than k events occur\n k = 1\n p_atleast_one_fail = 1 - rv.cdf(k - 1)\n print('p_atleast_one_fail = {:.4f}%'.format(p_atleast_one_fail * 100))\n\n k = 2\n p_atleast_two_fail = 1 - rv.cdf(k - 1)\n print('p_atleast_two_fail = {:.4f}%'.format(p_atleast_two_fail * 100))\n\n probabilities_for_y_years(y=1)\n probabilities_for_y_years(y=5)\n probabilities_for_y_years(y=10)\n probabilities_for_y_years(y=15)\n\n\n ##\n ##\n # The PMF is the probability that exactly k failures occur in y years\n k = 1\n p_one_fail = rv.pmf(k)\n print('p_one_fail = {:.4f}%'.format(p_one_fail * 100))\n k = 2\n p_two_fail = rv.pmf(k)\n print('p_two_fail = {:.4f}%'.format(p_two_fail * 100))\n\n # The CDF(k) is the probability the k or fewer failures occur in y years.\n # So, the probability k or more events occur is 1 - CDF(k - 1)\n # k or fewer, so 1 - CDF is the probability more than k events occur\n k = 1\n p_atleast_one_fail = 1 - rv.cdf(k - 1)\n print('p_atleast_one_fail = {:.4f}%'.format(p_atleast_one_fail * 100))\n\n k = 2\n p_atleast_two_fail = 1 - rv.cdf(k - 1)\n print('p_atleast_two_fail = {:.4f}%'.format(p_atleast_two_fail * 100))\n\n\n\n\n # Probability k disks fail after y years\n k = 1\n p_one_fail = ((λ * y) ** k) * np.exp(-λ * y) / (scipy.special.factorial(k))\n print('p_one_fail = {:.4f}%'.format(p_one_fail * 100))\n\n k = 2\n p_two_fail = ((λ * y) ** k) * np.exp(-λ * y) / (scipy.special.factorial(k))\n print('p_two_fail = {:.4f}%'.format(p_two_fail * 100))\n"
] |
[
[
"pandas.DataFrame"
],
[
"numpy.exp",
"pandas.read_excel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mepearson/Dash
|
[
"b8755882818ac6d064a3c91c1b0eaec930b10a10"
] |
[
"dynamic_sql_scatter.py"
] |
[
"# dash libs\r\nimport collections\r\nimport dash\r\nimport pandas as pd\r\nfrom sqlalchemy import create_engine\r\n\r\n# dash interactive states\r\nfrom dash.dependencies import Input, Output, State\r\nfrom dash.exceptions import PreventUpdate\r\n\r\n# dash components\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nimport dash_table\r\n\r\n# Plotly figure libraries\r\nimport plotly.express as px\r\n\r\n# set connection string\r\nuser = 'user'\r\npassword = 'password'\r\nDATABASE_URI = 'postgres+psycopg2://{}:{}@localhost:5432/dataviz'.format(user,password)\r\ncon = create_engine(DATABASE_URI)\r\n\r\n#styling\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\ndimensions = [\"x\", \"y\", \"color\", \"facet_col\", \"facet_row\"]\r\ndgraph = dimensions + ['hover-dropdown']\r\nuser_cols = {'':{},\r\n 'cycles':{'crop', 'location',\r\n 'planting_date', 'nitrogen_rate', 'weed_fraction', 'yield',\r\n 'year','unique_id'},\r\n 'economic':{}}\r\n\r\n#Config elements\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\napp.config.suppress_callback_exceptions = True\r\n\r\ntables = ['cycles']\r\n# Layout\r\n\r\napp.layout = html.Div([\r\n dcc.Store(id='s-cols'),\r\n dcc.Store(id='s-data'),\r\n html.Div([\r\n html.Div([\r\n html.P(['Datasource: ']),\r\n dcc.Dropdown(id='dd-table',\r\n options=[dict(label=x, value=x) for x in tables]\r\n ),\r\n html.Button('Show Data', id='btn-table'),\r\n html.P(['X Axis: ']),\r\n dcc.Dropdown(id='dd-x',options=[]),\r\n html.P(['Y Axis: ']),\r\n dcc.Dropdown(id='dd-y'),\r\n html.P(['Color: ']),\r\n dcc.Dropdown(id='dd-color'),\r\n html.P(['Facet Column: ']),\r\n dcc.Dropdown(id='dd-facet_col'),\r\n html.P(['Facet Row: ']),\r\n dcc.Dropdown(id='dd-facet_row'),\r\n html.P(['On Hover show: ']),\r\n html.Div([dcc.Dropdown(id='dd-hover',multi=True)]),\r\n # html.Div(id='collist'),\r\n # dcc.Dropdown(id='dd-cols'),\r\n # html.Div(id='table'),\r\n html.Div(id='test'),\r\n html.Div([\r\n html.Button('Build Graph', id='btn-graph'),\r\n ],style={'float':'right'})\r\n ],className=\"four columns\"),\r\n html.Div([\r\n dcc.Graph(id='g-scatter')\r\n ],className=\"eight columns\")\r\n ], className=\"row\"),\r\n html.Div([\r\n html.Div(id='dt-table')\r\n ],className=\"row\")\r\n])\r\n\r\n# Callbacks\r\n# Query SQL for selected table to generate columns list\r\[email protected]([Output('s-cols', 'data'),Output('s-data', 'data')],\r\n [Input(\"dd-table\", \"value\")],\r\n [State('s-cols', 'data'),State('s-data', 'data')]\r\n )\r\ndef update_cols(table, cols, data):\r\n if table is None or table == '':\r\n raise PreventUpdate\r\n col_list = list(user_cols[table])\r\n col_list.sort(key=str.lower)\r\n select_cols = \", \".join(list(col_list))\r\n query = 'SELECT {} FROM {}'.format(select_cols,table)\r\n dataf = pd.read_sql(query,con)\r\n return [col_list, dataf.to_dict('records')]\r\n\r\[email protected](Output('dt-table', 'children'),\r\n [Input('btn-table', 'n_clicks')],\r\n [State('s-data', 'data')]\r\n )\r\ndef show_data(n_clicks,sdata):\r\n if n_clicks is None:\r\n raise PreventUpdate\r\n tabledata = pd.DataFrame(sdata)\r\n dt = [dash_table.DataTable(\r\n id='table',\r\n columns=[{\"name\": i, \"id\": i} for i in tabledata.columns],\r\n data=tabledata.to_dict('records'),\r\n )]\r\n return dt\r\n\r\n#Update options for all graph component elements\r\nfor d in ('dd-x','dd-y','dd-color','dd-facet_col','dd-facet_row','dd-hover'):\r\n @app.callback(\r\n Output(d, \"options\"),\r\n [Input('s-cols', 'modified_timestamp')],\r\n [State('s-cols', 'data')])\r\n def update_dropdown(ts, col_list):\r\n if ts is None:\r\n raise PreventUpdate\r\n data_options = [{\"label\": i, \"value\":i} for i in col_list]\r\n return data_options\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True,port=8060)\r\n"
] |
[
[
"pandas.read_sql",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
DuJiajun1994/RepeatBuyerPrediction
|
[
"3e77aa3c9468060b335bd9be056cd9c3e1975393"
] |
[
"tools/date_process.py"
] |
[
"import pandas as pd\r\nimport numpy as np\r\n\r\nuser_log = pd.read_csv('user_log_format1.csv')\r\nuser_log = user_log.drop('item_id', 1)\r\nuser_log = user_log.drop('cat_id', 1)\r\nuser_log = user_log.drop('seller_id', 1)\r\nuser_log = user_log.drop('brand_id', 1)\r\n#7 months + 11.11 mul 4 action_type\r\nresult = [0] * (8*4+1)\r\nFin_result = [0] * 33\r\nFin_result = np.array(Fin_result)\r\nuser_id = user_log.iloc[0, 0]\r\n\r\nfor ix, row in user_log.iterrows():\r\n\r\n if ix == 0:\r\n index = (int(row[\"time_stamp\"] / 100) - 5) * 4 + row[\"action_type\"]\r\n result[index] += 1\r\n result[32] = user_id\r\n\r\n elif row[\"user_id\"] == user_id:\r\n if row[\"time_stamp\"] == 1111:\r\n index = int(28 + row[\"action_type\"])\r\n else:\r\n index = (int(row[\"time_stamp\"] / 100) - 5) * 4 + row[\"action_type\"]\r\n result[index] += 1\r\n\r\n else:\r\n result = np.array(result)\r\n Fin_result = np.row_stack((Fin_result, result))\r\n result = [0] * (8 * 4+1)\r\n user_id = row[\"user_id\"]\r\n if row[\"time_stamp\"] == 1111:\r\n index = int(28 + row[\"action_type\"])\r\n else:\r\n index = (int(row[\"time_stamp\"] / 100) - 5) * 4 + row[\"action_type\"]\r\n result[index] += 1\r\n result[32] = user_id\r\n\r\n if ix % 10000 == 0:\r\n print('processing %d w' % int(ix/10000))\r\n\r\nFin_result = np.row_stack((Fin_result, result))\r\nFin_result = np.delete(Fin_result, 0, axis=0)\r\nFin_result = Fin_result[np.lexsort(Fin_result.T)]\r\nFin_result = np.delete(Fin_result, [32], axis=1)\r\nnp.savetxt('user_date_ver2.txt', Fin_result, fmt=\"%d\")\r\n\r\n"
] |
[
[
"pandas.read_csv",
"numpy.lexsort",
"numpy.delete",
"numpy.row_stack",
"numpy.savetxt",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mhilmiasyrofi/CodeXGLUE
|
[
"6b267a0047319378954334820a32ea916848d1f6"
] |
[
"Code-Code/ActionableWarning-detection/code/run.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport shutil\nimport sklearn\nfrom sklearn.metrics import accuracy_score\n\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nimport json\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept:\n from tensorboardX import SummaryWriter\n\nfrom tqdm import tqdm, trange\nimport multiprocessing\nfrom model import Model\ncpu_cont = multiprocessing.cpu_count()\nfrom transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,\n BertConfig, BertForMaskedLM, BertTokenizer,\n GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,\n OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,\n RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,\n DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CLASSES = {\n 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),\n 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),\n 'bert': (BertConfig, BertForMaskedLM, BertTokenizer),\n 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\n 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)\n}\n\n\n\nclass InputFeatures(object):\n \"\"\"A single training/test features for a example.\"\"\"\n def __init__(self,\n input_tokens,\n input_ids,\n idx,\n label,\n\n ):\n self.input_tokens = input_tokens\n self.input_ids = input_ids\n self.idx=str(idx)\n self.label=label\n\n \ndef convert_examples_to_features(js,tokenizer,args):\n #source\n code=' '.join(js['func'].split())\n code_tokens=tokenizer.tokenize(code)[:args.block_size-2]\n source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]\n source_ids = tokenizer.convert_tokens_to_ids(source_tokens)\n padding_length = args.block_size - len(source_ids)\n source_ids+=[tokenizer.pad_token_id]*padding_length\n return InputFeatures(source_tokens,source_ids,js['idx'],js['target'])\n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer, args, file_path=None):\n self.examples = []\n with open(file_path) as f:\n for line in f:\n js=json.loads(line.strip())\n self.examples.append(convert_examples_to_features(js,tokenizer,args))\n if 'train' in file_path:\n for idx, example in enumerate(self.examples[:3]):\n logger.info(\"*** Example ***\")\n logger.info(\"idx: {}\".format(idx))\n logger.info(\"label: {}\".format(example.label))\n logger.info(\"input_tokens: {}\".format([x.replace('\\u0120','_') for x in example.input_tokens]))\n logger.info(\"input_ids: {}\".format(' '.join(map(str, example.input_ids))))\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i): \n return torch.tensor(self.examples[i].input_ids),torch.tensor(self.examples[i].label)\n \n\ndef set_seed(seed=42):\n random.seed(seed)\n os.environ['PYHTONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\" \n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n \n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, \n batch_size=args.train_batch_size,num_workers=4,pin_memory=True)\n args.max_steps=args.epoch*len( train_dataloader)\n args.save_steps=len( train_dataloader)\n args.warmup_steps=len( train_dataloader)\n args.logging_steps=len( train_dataloader)\n args.num_train_epochs=args.epoch\n model.to(args.device)\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,\n num_training_steps=args.max_steps)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n\n checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')\n scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')\n optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')\n if os.path.exists(scheduler_last):\n scheduler.load_state_dict(torch.load(scheduler_last))\n if os.path.exists(optimizer_last):\n optimizer.load_state_dict(torch.load(optimizer_last))\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (\n torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", args.max_steps)\n \n global_step = args.start_step\n tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0\n best_mrr=0.0\n best_acc=0.0\n # model.resize_token_embeddings(len(tokenizer))\n model.zero_grad()\n \n for idx in range(args.start_epoch, int(args.num_train_epochs)): \n bar = tqdm(train_dataloader,total=len(train_dataloader))\n tr_num=0\n train_loss=0\n for step, batch in enumerate(bar):\n inputs = batch[0].to(args.device) \n labels=batch[1].to(args.device) \n model.train()\n loss,logits = model(inputs,labels)\n\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n tr_loss += loss.item()\n tr_num+=1\n train_loss+=loss.item()\n if avg_loss==0:\n avg_loss=tr_loss\n avg_loss=round(train_loss/tr_num,5)\n bar.set_description(\"epoch {} loss {}\".format(idx,avg_loss))\n\n \n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step() \n global_step += 1\n output_flag=True\n avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logging_loss = tr_loss\n tr_nb=global_step\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n \n if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer,eval_when_training=True)\n for key, value in results.items():\n logger.info(\" %s = %s\", key, round(value,4)) \n # Save model checkpoint\n \n if results['eval_acc']>best_acc:\n best_acc=results['eval_acc']\n logger.info(\" \"+\"*\"*20) \n logger.info(\" Best acc:%s\",round(best_acc,4))\n logger.info(\" \"+\"*\"*20) \n \n checkpoint_prefix = 'checkpoint-best-acc'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n if not os.path.exists(output_dir):\n os.makedirs(output_dir) \n model_to_save = model.module if hasattr(model,'module') else model\n output_dir = os.path.join(output_dir, '{}'.format('model.bin')) \n torch.save(model_to_save.state_dict(), output_dir)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n \n\n\n\ndef evaluate(args, model, tokenizer,eval_when_training=False):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_output_dir = args.output_dir\n\n eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)\n\n # multi-gpu evaluate\n if args.n_gpu > 1 and eval_when_training is False:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n logits=[] \n labels=[]\n for batch in eval_dataloader:\n inputs = batch[0].to(args.device) \n label=batch[1].to(args.device) \n with torch.no_grad():\n lm_loss,logit = model(inputs,label)\n eval_loss += lm_loss.mean().item()\n logits.append(logit.cpu().numpy())\n labels.append(label.cpu().numpy())\n nb_eval_steps += 1\n logits=np.concatenate(logits,0)\n labels=np.concatenate(labels,0)\n preds=logits[:,0]>0.5\n eval_acc=np.mean(labels==preds)\n eval_prec = sklearn.metrics.precision_score(labels, preds)\n eval_recall = sklearn.metrics.recall_score(labels, preds)\n eval_f1 = sklearn.metrics.f1_score(labels, preds)\n eval_loss = eval_loss / nb_eval_steps\n perplexity = torch.tensor(eval_loss)\n \n result = {\n \"eval_loss\": float(perplexity),\n \"eval_acc\":round(eval_acc,4),\n \"eval_prec\":round(eval_prec,4),\n \"eval_recall\":round(eval_recall,4),\n \"eval_f1\":round(eval_f1,4),\n }\n return result\n\ndef test(args, model, tokenizer):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_dataset = TextDataset(tokenizer, args,args.test_data_file)\n\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running Test *****\")\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n logits=[] \n labels=[]\n for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):\n inputs = batch[0].to(args.device) \n label=batch[1].to(args.device) \n with torch.no_grad():\n logit = model(inputs)\n logits.append(logit.cpu().numpy())\n labels.append(label.cpu().numpy())\n\n logits=np.concatenate(logits,0)\n labels=np.concatenate(labels,0)\n preds=logits[:,0]>0.5\n with open(os.path.join(args.output_dir,\"predictions.txt\"),'w') as f:\n for example,pred in zip(eval_dataset.examples,preds):\n if pred:\n f.write(example.idx+'\\t1\\n')\n else:\n f.write(example.idx+'\\t0\\n') \n \n \n \ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--train_data_file\", default=None, type=str, required=True,\n help=\"The input training data file (a text file).\")\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--eval_data_file\", default=None, type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\")\n parser.add_argument(\"--test_data_file\", default=None, type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\")\n \n parser.add_argument(\"--model_type\", default=\"bert\", type=str,\n help=\"The model architecture to be fine-tuned.\")\n parser.add_argument(\"--model_name_or_path\", default=None, type=str,\n help=\"The model checkpoint for weights initialization.\")\n\n parser.add_argument(\"--mlm\", action='store_true',\n help=\"Train with masked-language modeling loss instead of language modeling.\")\n parser.add_argument(\"--mlm_probability\", type=float, default=0.15,\n help=\"Ratio of tokens to mask for masked language modeling loss\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path\")\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)\")\n parser.add_argument(\"--block_size\", default=-1, type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\", action='store_true',\n help=\"Whether to run eval on the dev set.\") \n parser.add_argument(\"--evaluate_during_training\", action='store_true',\n help=\"Run evaluation during training at each logging step.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--eval_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=1.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument('--logging_steps', type=int, default=50,\n help=\"Log every X updates steps.\")\n parser.add_argument('--save_steps', type=int, default=50,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument('--save_total_limit', type=int, default=None,\n help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n help=\"Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\")\n parser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument('--overwrite_cache', action='store_true',\n help=\"Overwrite the cached training and evaluation sets\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--epoch', type=int, default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\n parser.add_argument('--fp16_opt_level', type=str, default='O1',\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\")\n parser.add_argument('--server_ip', type=str, default='', help=\"For distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"For distant debugging.\")\n\n\n \n\n args = parser.parse_args()\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n args.device = device\n args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu\n args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu\n # Setup logging\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\n\n\n\n # Set seed\n set_seed(args.seed)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab\n\n args.start_epoch = 0\n args.start_step = 0\n checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')\n if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):\n args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')\n args.config_name = os.path.join(checkpoint_last, 'config.json')\n idx_file = os.path.join(checkpoint_last, 'idx_file.txt')\n with open(idx_file, encoding='utf-8') as idxf:\n args.start_epoch = int(idxf.readlines()[0].strip()) + 1\n\n step_file = os.path.join(checkpoint_last, 'step_file.txt')\n if os.path.exists(step_file):\n with open(step_file, encoding='utf-8') as stepf:\n args.start_step = int(stepf.readlines()[0].strip())\n\n logger.info(\"reload model from {}, resume from {} epoch\".format(checkpoint_last, args.start_epoch))\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None)\n config.num_labels=1\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None)\n if args.block_size <= 0:\n args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model\n args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)\n if args.model_name_or_path:\n model = model_class.from_pretrained(args.model_name_or_path,\n from_tf=bool('.ckpt' in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None) \n else:\n model = model_class(config)\n\n model=Model(model,config,tokenizer,args)\n if args.local_rank == 0:\n torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n train_dataset = TextDataset(tokenizer, args,args.train_data_file)\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n train(args, train_dataset, model, tokenizer)\n\n\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n checkpoint_prefix = 'checkpoint-best-acc/model.bin'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n model.load_state_dict(torch.load(output_dir)) \n model.to(args.device)\n result=evaluate(args, model, tokenizer)\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(round(result[key],4)))\n \n if args.do_test and args.local_rank in [-1, 0]:\n checkpoint_prefix = 'checkpoint-best-acc/model.bin'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n model.load_state_dict(torch.load(output_dir)) \n model.to(args.device)\n test(args, model, tokenizer)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n\n\n"
] |
[
[
"torch.load",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"numpy.mean",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"sklearn.metrics.f1_score",
"numpy.exp",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.barrier",
"torch.tensor",
"sklearn.metrics.precision_score",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"sklearn.metrics.recall_score",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sjang42/keraspp
|
[
"1496a20b50b1f7ae19f0da32c40c210285406e93"
] |
[
"my_ex2_2.py"
] |
[
"from keras import layers, models\nfrom keras import datasets\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\n\n\ndef boston_housing_data():\n (X_train, y_train), (X_test, y_test) = datasets.boston_housing.load_data()\n scaler = preprocessing.MinMaxScaler()\n\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n return (X_train, y_train), (X_test, y_test)\n\n\ndef plot_loss(history, title=None):\n if not isinstance(history, dict):\n history = history.history\n\n plt.plot(history['loss'])\n plt.plot(history['val_loss'])\n\n if title is not None:\n plt.title(title)\n\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Training', 'Validation'], loc=0)\n\n # plt.show()\n\n\nclass ANN_models_regression(models.Model):\n def __init__(self, input_size, hidden_size, output_size):\n hidden = layers.Dense(hidden_size)\n relu = layers.Activation('relu')\n output = layers.Dense(output_size)\n\n x = layers.Input(shape=(input_size,))\n h = relu(hidden(x))\n y = output(h)\n\n super().__init__(x, y)\n self.compile(loss='mse', optimizer='sgd')\n\n\ninput_size = 13\nhidden_size = 5\noutput_size = 1\n\nmodel = ANN_models_regression(input_size, hidden_size, output_size)\n\n(X_train, y_train), (X_test, y_test) = boston_housing_data()\nhistory = model.fit(X_train, y_train, epochs=100, batch_size=100, validation_split=0.2, verbose=2)\n\nperformance_test = model.evaluate(X_test, y_test, batch_size=100)\nprint('\\nTest Loss -> {}'.format(performance_test))\n\ny_predict = model.predict(X_test, batch_size=100)\n\nplot_loss(history)\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jwzheng94/gps_time
|
[
"c9c5ec118b6bdbd1d3fdccb034765b5946eb0c98"
] |
[
"gps_time/utilities.py"
] |
[
"# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/10_utilities.ipynb (unless otherwise specified).\n\n__all__ = ['logger', 'arange_gpstime', 'validate_gps_week']\n\n# Cell\n\"\"\"Copyright 2020 The Aerospace Corporation\"\"\"\n\n# Cell\n\nimport numpy as np\n\nfrom typing import List\nfrom logging import getLogger\n\nfrom .core import GPSTime\n\nlogger = getLogger(__name__)\n\n# Cell\ndef arange_gpstime(\n start_gpstime: GPSTime, duration_s: float, step_ms: float\n) -> List[GPSTime]:\n \"\"\"Create a list of GPSTimes in sequence.\n\n The purpose of this function is to create a list that represents a\n sequence of GPSTimes of the specified duration with the specified step\n size.\n\n This function is an analogue of the `numpy.arange()` function, but\n operates on GPSTimes.\n\n Parameters\n ----------\n start_gpstime : GPSTime\n The GPSTime to start the sequence\n duration_s : float\n The duration of the sequence, in seconds\n step_ms : float\n The step size, in milliseconds\n\n Returns\n -------\n List[GPSTime]\n The sequence of GPSTimes\n\n Notes\n -----\n Like `numpy.arange`, this does not include the final element. That is, if\n the start is at 0 with a duration of 5 and step of 1, the sequence would\n return [0, 1, 2, 3, 4]\n\n See Also\n --------\n `numpy.arange()`\n `arange_datetime()`\n\n Todo\n ----\n .. todo:: Determine if this still works if a np.ndarray is returned\n instead of a list\n\n \"\"\"\n return list(start_gpstime + np.arange(0, duration_s, step_ms / 1000))\n\n# Cell\ndef validate_gps_week(full_week: int, gps_week: int) -> None:\n \"\"\"Validate that the week numbers are consistent.\n\n This function validates that the full GPS week number (i.e. the number of\n weeks since 6 Jan 1980) and the mod-1024 week numbers are consistent. If\n they are not, it raises an error.\n\n Parameters\n ----------\n full_week : int\n The number of weeks since 6 Jan 1980\n gps_week : int\n The mod-1024 GPS week\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If the `full_week` and `gps_week` disagree\n\n \"\"\"\n if full_week % 1024 != gps_week:\n raise ValueError(\n \"\".join([\"Full GPS Week {} must be mod 1024 of \", \"GPS Week {}\"]).format(\n full_week, gps_week\n )\n )"
] |
[
[
"numpy.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ig248/timeserio
|
[
"afc2a953a83e763418d417059493ef13a17d349c",
"afc2a953a83e763418d417059493ef13a17d349c",
"afc2a953a83e763418d417059493ef13a17d349c"
] |
[
"timeserio/preprocessing/pandas.py",
"timeserio/keras/utils.py",
"tests/test_pipeline/test_utils.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nfrom .utils import _as_list_of_str\n\n\ndef array_to_dataframe(\n array: np.ndarray, column: str, df=None\n) -> pd.DataFrame:\n \"\"\"Add 1D or 2D array as column in df.\n\n If no df provided, return a new one.\n \"\"\"\n if len(array.shape) < 0 or len(array.shape) > 2:\n raise ValueError('Expecting 1D or 2D array')\n if len(array.shape) == 1:\n array = array.reshape(-1, 1)\n n_columns = array.shape[1]\n cidx = pd.MultiIndex.from_arrays(\n [\n [column] * n_columns,\n range(n_columns),\n ]\n )\n df_arr = pd.DataFrame(array, columns=cidx)\n if df is None:\n df = df_arr\n else:\n df = _join_multilevel_dataframes([df, df_arr])\n return df\n\n\ndef _join_multilevel_dataframes(df_list):\n \"\"\"Concat multiple dataframes.\n\n Support a combination of 1- and 2-deep indices.\n \"\"\"\n minx_df = []\n for df in df_list:\n if isinstance(df.columns, pd.MultiIndex):\n minx_df.append(df)\n else:\n df.columns = pd.MultiIndex.from_product([df.columns, ['']])\n minx_df.append(df)\n # Join all dataframes together\n multi_concat = pd.concat(minx_df, axis=1)\n return multi_concat\n\n\nclass PandasColumnSelector(BaseEstimator, TransformerMixin):\n \"\"\"Select a sub-set of columns from a pandas DataFrame.\"\"\"\n\n def __init__(self, columns=None):\n self.columns = columns\n\n def fit(self, df, y=None, **fit_params):\n return self\n\n def transform(self, df):\n columns = _as_list_of_str(self.columns)\n subframe = df[columns]\n return subframe.copy()\n\n @property\n def required_columns(self):\n return set(_as_list_of_str(self.columns))\n\n def transformed_columns(self, input_columns):\n input_columns = set(_as_list_of_str(input_columns))\n if not self.required_columns <= input_columns:\n raise ValueError(f'Required columns are {self.required_columns}')\n return self.required_columns\n\n\ndef _get_column_as_tensor(s: pd.Series):\n \"\"\"Get every normal or TensorArray column as a 2D array.\"\"\"\n try:\n return s.tensor.values\n except AttributeError: # normal column\n return s.values.reshape(-1, 1)\n\n\nclass PandasValueSelector(BaseEstimator, TransformerMixin):\n \"\"\"Select scalar - or vector-valued feature cols, and return np.array.\n\n Optionally, cast the resulting arry to dtype.\n \"\"\"\n\n def __init__(self, columns=None, dtype=None):\n self.columns = columns\n self.dtype = dtype\n\n def fit(self, df, y=None, **fit_params):\n return self\n\n def transform(self, df):\n columns = _as_list_of_str(self.columns)\n any_tensors = any(hasattr(df[col], \"tensor\") for col in columns)\n if not any_tensors:\n subarray = df[columns].values\n else: # support a mix of compatible tensors and regular columns\n blocks = [_get_column_as_tensor(df[col]) for col in columns]\n subarray = np.hstack(blocks)\n if self.dtype:\n subarray = subarray.astype(self.dtype)\n return subarray\n\n @property\n def required_columns(self):\n return set(_as_list_of_str(self.columns))\n\n def transformed_columns(self, input_columns):\n input_columns = set(_as_list_of_str(input_columns))\n if not self.required_columns <= input_columns:\n raise ValueError(f'Required columns are {self.required_columns}')\n return {None}\n\n\nclass PandasIndexValueSelector(BaseEstimator, TransformerMixin):\n \"\"\"Select index levels as feature cols, and return np.array.\n\n Optionally, cast the resulting arry to dtype.\n \"\"\"\n\n def __init__(self, levels=None, dtype=None):\n self.levels = levels\n self.dtype = dtype\n\n def fit(self, df, y=None, **fit_params):\n return self\n\n def transform(self, df):\n levels = self.levels or []\n if isinstance(levels, str):\n levels = [levels]\n try:\n iter(levels)\n except TypeError:\n levels = [levels]\n blocks = [\n df.index.get_level_values(level).values.reshape(-1, 1)\n for level in levels\n ]\n subarray = np.hstack(blocks) if blocks else np.empty((len(df), 0))\n if self.dtype:\n subarray = subarray.astype(self.dtype)\n return subarray\n\n\nclass PandasSequenceSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Split sequence columns in two.\"\"\"\n\n def __init__(self, columns=None, index=0):\n self.columns = columns\n self.index = index\n\n def fit(self, df, y=None, **fit_params):\n return self\n\n def transform(self, df):\n columns = _as_list_of_str(self.columns)\n index = self.index\n for col in columns:\n values = df[col].values\n df = array_to_dataframe(values[:, :index], f'{col}_pre', df=df)\n df = array_to_dataframe(values[:, index:], f'{col}_post', df=df)\n return df\n\n @property\n def required_columns(self):\n columns = set(_as_list_of_str(self.columns))\n return columns\n\n def transformed_columns(self, input_columns):\n columns = _as_list_of_str(input_columns)\n input_columns = set(columns)\n if not self.required_columns <= input_columns:\n raise ValueError(f'Required columns are {self.required_columns}')\n to_change = _as_list_of_str(self.columns)\n columns = [f'{col}_post' for col in to_change]\n columns2 = [f'{col}_pre' for col in to_change]\n return set(np.concatenate([columns, columns2]))\n",
"from contextlib import contextmanager\nimport inspect\nimport os\nimport random as py_random\nfrom typing import Iterable\n\nimport numpy as np\n\nfrom timeserio.externals import tensorflow as tf, keras\n\n\ndef iterlayers(model: keras.layers.Layer) -> Iterable[keras.layers.Layer]:\n \"\"\"\n Return iterable over all layers (and sub-layers) of a model.\n\n This works because a keras Model is a sub-class of Layer.\n Can be used for freezing/un-freezing layers, etc.\n \"\"\"\n if hasattr(model, 'layers'):\n for layer in model.layers:\n yield from iterlayers(layer)\n else:\n yield model\n\n\ndef has_arg(fn, name, accept_all=False):\n \"\"\"Check if a callable accepts a given keyword argument.\n\n See https://github.com/tensorflow/tensorflow/pull/37004\n\n Arguments:\n fn: Callable to inspect.\n name: Check if `fn` can be called with `name` as a keyword argument.\n accept_all: What to return if there is no parameter called `name` but the\n function accepts a `**kwargs` argument.\n Returns:\n bool, whether `fn` accepts a `name` keyword argument.\n \"\"\"\n arg_spec = inspect.getfullargspec(fn)\n if accept_all and arg_spec.varkw is not None:\n return True\n return name in arg_spec.args or name in arg_spec.kwonlyargs\n\n\n@contextmanager\ndef seed_random(seed=42):\n \"\"\"Seed all random number generators to ensure repeatable tests.\n\n Sets python, `numpy`, and `tensorflow` random seeds\n to a repeatable states. This is useful in tests, but should not be\n used in production.\n\n https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development\n \"\"\"\n os.environ['PYTHONHASHSEED'] = f'{seed}'\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n py_random.seed(seed)\n np.random.seed(seed)\n tf.compat.v1.reset_default_graph()\n\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(\n intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1,\n )\n session = tf.compat.v1.Session(graph=graph, config=config)\n tf.compat.v1.keras.backend.set_session(session)\n with tf.device(\"/cpu:0\"), graph.as_default(), session.as_default():\n tf.compat.v1.set_random_seed(seed)\n graph.seed = seed\n yield\n tf.compat.v1.keras.backend.clear_session()\n",
"import pytest\n\nimport pandas as pd\n\nfrom timeserio.pipeline.pipeline import _parse_df_y\n\n\[email protected]\ndef df():\n df = pd.DataFrame({\n \"x\": [1, 2, 3],\n \"y\": [1, 4, 9],\n })\n return df\n\n\ndef test_fit_decorator(df):\n \"\"\"Test we can pass the target column by name.\"\"\"\n def fit(df, y):\n df, y = _parse_df_y(df, y)\n return df[\"x\"] + y\n\n fit1 = fit(df, df[\"y\"])\n fit2 = fit(df, \"y\")\n\n pd.testing.assert_series_equal(fit1, fit2)\n"
] |
[
[
"numpy.hstack",
"pandas.concat",
"pandas.DataFrame",
"numpy.concatenate",
"pandas.MultiIndex.from_product"
],
[
"numpy.random.seed"
],
[
"pandas.testing.assert_series_equal",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
atfkaka/tensorflow
|
[
"5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a",
"5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a",
"5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a",
"5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a",
"5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a"
] |
[
"tensorflow/python/training/basic_session_run_hooks_test.py",
"tensorflow/python/util/deprecation_test.py",
"tensorflow/contrib/testing/python/framework/fake_summary_writer.py",
"tensorflow/python/ops/logging_ops.py",
"tensorflow/models/image/cifar10/cifar10.py"
] |
[
"# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for basic_session_run_hooks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport shutil\nimport tempfile\nimport threading\nimport time\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib import testing\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import monitored_session\n\n\nclass SecondOrStepTimerTest(tf.test.TestCase):\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks._SecondOrStepTimer(every_secs=2.0, every_steps=10)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks._SecondOrStepTimer()\n\n def test_every_secs(self):\n timer = basic_session_run_hooks._SecondOrStepTimer(every_secs=1.0)\n self.assertTrue(timer.should_trigger_for_step(1))\n\n timer.update_last_triggered_step(1)\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertFalse(timer.should_trigger_for_step(2))\n\n time.sleep(1.0)\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertTrue(timer.should_trigger_for_step(2))\n\n def test_every_steps(self):\n timer = basic_session_run_hooks._SecondOrStepTimer(every_steps=3)\n self.assertTrue(timer.should_trigger_for_step(1))\n\n timer.update_last_triggered_step(1)\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertFalse(timer.should_trigger_for_step(2))\n self.assertFalse(timer.should_trigger_for_step(3))\n self.assertTrue(timer.should_trigger_for_step(4))\n\n def test_update_last_triggered_step(self):\n timer = basic_session_run_hooks._SecondOrStepTimer(every_steps=1)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)\n self.assertEqual(None, elapsed_secs)\n self.assertEqual(None, elapsed_steps)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)\n self.assertLess(0, elapsed_secs)\n self.assertEqual(4, elapsed_steps)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)\n self.assertLess(0, elapsed_secs)\n self.assertEqual(2, elapsed_steps)\n\n\nclass StopAtStepTest(tf.test.TestCase):\n\n def test_raise_in_both_last_step_and_num_steps(self):\n with self.assertRaises(ValueError):\n tf.train.StopAtStepHook(num_steps=10, last_step=20)\n\n def test_stop_based_on_last_step(self):\n h = tf.train.StopAtStepHook(last_step=10)\n with tf.Graph().as_default():\n global_step = tf.contrib.framework.get_or_create_global_step()\n no_op = tf.no_op()\n h.begin()\n with tf.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(tf.assign(global_step, 5))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(tf.assign(global_step, 9))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(tf.assign(global_step, 10))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n sess.run(tf.assign(global_step, 11))\n mon_sess._should_stop = False\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n def test_stop_based_on_num_step(self):\n h = tf.train.StopAtStepHook(num_steps=10)\n\n with tf.Graph().as_default():\n global_step = tf.contrib.framework.get_or_create_global_step()\n no_op = tf.no_op()\n h.begin()\n with tf.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(tf.assign(global_step, 5))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(tf.assign(global_step, 13))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(tf.assign(global_step, 14))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n sess.run(tf.assign(global_step, 15))\n mon_sess._should_stop = False\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n\nclass LoggingTensorHookTest(tf.test.TestCase):\n\n def setUp(self):\n # Mock out logging calls so we can verify whether correct tensors are being\n # monitored.\n self._actual_log = tf.logging.info\n self.logged_message = None\n\n def mock_log(*args, **kwargs):\n self.logged_message = args\n self._actual_log(*args, **kwargs)\n\n tf.logging.info = mock_log\n\n def tearDown(self):\n tf.logging.info = self._actual_log\n\n def test_illegal_args(self):\n with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):\n tf.train.LoggingTensorHook(tensors=['t'], every_n_iter=0)\n with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):\n tf.train.LoggingTensorHook(tensors=['t'], every_n_iter=-10)\n with self.assertRaisesRegexp(ValueError, 'xactly one of'):\n tf.train.LoggingTensorHook(tensors=['t'], every_n_iter=5, every_n_secs=5)\n with self.assertRaisesRegexp(ValueError, 'xactly one of'):\n tf.train.LoggingTensorHook(tensors=['t'])\n\n def test_print_every_n_steps(self):\n with tf.Graph().as_default(), tf.Session() as sess:\n t = tf.constant(42.0, name='foo')\n train_op = tf.constant(3)\n hook = tf.train.LoggingTensorHook(tensors=[t.name], every_n_iter=10)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n sess.run(tf.global_variables_initializer())\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n for j in range(3):\n _ = j\n self.logged_message = ''\n for i in range(9):\n _ = i\n mon_sess.run(train_op)\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n def test_print_every_n_secs(self):\n with tf.Graph().as_default(), tf.Session() as sess:\n t = tf.constant(42.0, name='foo')\n train_op = tf.constant(3)\n\n hook = tf.train.LoggingTensorHook(tensors=[t.name], every_n_secs=1.0)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n sess.run(tf.global_variables_initializer())\n\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.logged_message = ''\n mon_sess.run(train_op)\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n time.sleep(1.0)\n\n self.logged_message = ''\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n\nclass CheckpointSaverHookTest(tf.test.TestCase):\n\n def setUp(self):\n self.model_dir = tempfile.mkdtemp()\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.scaffold = monitored_session.Scaffold()\n self.global_step = tf.contrib.framework.get_or_create_global_step()\n self.train_op = tf.assign_add(self.global_step, 1)\n\n def tearDown(self):\n shutil.rmtree(self.model_dir, ignore_errors=True)\n\n def test_raise_when_saver_and_scaffold_both_missing(self):\n with self.assertRaises(ValueError):\n tf.train.CheckpointSaverHook(self.model_dir)\n\n def test_raise_when_saver_and_scaffold_both_present(self):\n with self.assertRaises(ValueError):\n tf.train.CheckpointSaverHook(\n self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold)\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n tf.train.CheckpointSaverHook(self.model_dir, save_secs=10, save_steps=20)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n tf.train.CheckpointSaverHook(self.model_dir)\n\n def test_save_secs_saves_in_first_step(self):\n with self.graph.as_default():\n hook = tf.train.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with tf.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n\n def test_save_secs_saves_periodically(self):\n with self.graph.as_default():\n hook = tf.train.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with tf.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(1, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n time.sleep(2.5)\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(3, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(3, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n time.sleep(2.5)\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(6, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n\n def test_save_steps_saves_in_first_step(self):\n with self.graph.as_default():\n hook = tf.train.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with tf.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = tf.train.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with tf.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(1, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(3, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(3, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(5, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n\n def test_save_saves_at_end(self):\n with self.graph.as_default():\n hook = tf.train.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with tf.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n hook.end(sess)\n self.assertEqual(2, tf.contrib.framework.load_variable(\n self.model_dir, self.global_step.name))\n\n def test_summary_writer_defs(self):\n testing.FakeSummaryWriter.install()\n tf.train.SummaryWriterCache.clear()\n summary_writer = tf.train.SummaryWriterCache.get(self.model_dir)\n\n with self.graph.as_default():\n hook = tf.train.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with tf.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.model_dir,\n expected_added_meta_graphs=[meta_graph.create_meta_graph_def(\n graph_def=self.graph.as_graph_def(add_shapes=True),\n saver_def=self.scaffold.saver.saver_def)])\n\n testing.FakeSummaryWriter.uninstall()\n\n\nclass StepCounterHookTest(tf.test.TestCase):\n\n def setUp(self):\n self.log_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.log_dir, ignore_errors=True)\n\n def test_step_counter_every_n_steps(self):\n with tf.Graph().as_default() as g, tf.Session() as sess:\n global_step = tf.contrib.framework.get_or_create_global_step()\n train_op = tf.assign_add(global_step, 1)\n summary_writer = testing.FakeSummaryWriter(self.log_dir, g)\n hook = tf.train.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=10)\n hook.begin()\n sess.run(tf.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(30):\n time.sleep(0.01)\n mon_sess.run(train_op)\n hook.end(sess)\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([11, 21], summary_writer.summaries.keys())\n for step in [11, 21]:\n summary_value = summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n def test_step_counter_every_n_secs(self):\n with tf.Graph().as_default() as g, tf.Session() as sess:\n global_step = tf.contrib.framework.get_or_create_global_step()\n train_op = tf.assign_add(global_step, 1)\n summary_writer = testing.FakeSummaryWriter(self.log_dir, g)\n hook = tf.train.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)\n\n hook.begin()\n sess.run(tf.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(train_op)\n time.sleep(0.2)\n mon_sess.run(train_op)\n time.sleep(0.2)\n mon_sess.run(train_op)\n hook.end(sess)\n\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertTrue(summary_writer.summaries, 'No summaries were created.')\n self.assertItemsEqual([2, 3], summary_writer.summaries.keys())\n for summary in summary_writer.summaries.values():\n summary_value = summary[0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n\nclass SummarySaverHookTest(tf.test.TestCase):\n\n def setUp(self):\n tf.test.TestCase.setUp(self)\n\n self.log_dir = 'log/dir'\n self.summary_writer = testing.FakeSummaryWriter(self.log_dir)\n\n var = tf.Variable(0.0)\n tensor = tf.assign_add(var, 1.0)\n tensor2 = tensor * 2\n self.summary_op = tf.summary.scalar('my_summary', tensor)\n self.summary_op2 = tf.summary.scalar('my_summary2', tensor2)\n\n global_step = tf.contrib.framework.get_or_create_global_step()\n self.train_op = tf.assign_add(global_step, 1)\n\n def test_raise_when_scaffold_and_summary_op_both_missing(self):\n with self.assertRaises(ValueError):\n tf.train.SummarySaverHook()\n\n def test_raise_when_scaffold_and_summary_op_both_present(self):\n with self.assertRaises(ValueError):\n tf.train.SummarySaverHook(scaffold=tf.train.Scaffold(),\n summary_op=self.summary_op)\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n tf.train.SummarySaverHook(\n save_secs=10,\n save_steps=20,\n summary_writer=self.summary_writer)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n tf.train.SummarySaverHook(\n save_secs=None,\n save_steps=None,\n summary_writer=self.summary_writer)\n\n def test_save_steps(self):\n hook = tf.train.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.test_session() as sess:\n hook.begin()\n sess.run(tf.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(30):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {'my_summary': 1.0},\n 9: {'my_summary': 2.0},\n 17: {'my_summary': 3.0},\n 25: {'my_summary': 4.0},\n })\n\n def test_multiple_summaries(self):\n hook = tf.train.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=[self.summary_op, self.summary_op2])\n\n with self.test_session() as sess:\n hook.begin()\n sess.run(tf.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(10):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0,\n 'my_summary2': 2.0\n },\n 9: {\n 'my_summary': 2.0,\n 'my_summary2': 4.0\n },\n })\n\n def test_save_secs_saving_once_every_step(self):\n hook = tf.train.SummarySaverHook(\n save_secs=0.5,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.test_session() as sess:\n hook.begin()\n sess.run(tf.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(4):\n mon_sess.run(self.train_op)\n time.sleep(0.5)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {'my_summary': 1.0},\n 2: {'my_summary': 2.0},\n 3: {'my_summary': 3.0},\n 4: {'my_summary': 4.0},\n })\n\n def test_save_secs_saving_once_every_three_steps(self):\n hook = tf.train.SummarySaverHook(\n save_secs=0.9,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.test_session() as sess:\n hook.begin()\n sess.run(tf.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(8):\n mon_sess.run(self.train_op)\n time.sleep(0.3)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {'my_summary': 1.0},\n 4: {'my_summary': 2.0},\n 7: {'my_summary': 3.0},\n })\n\n\nclass GlobalStepWaiterHookTest(tf.test.TestCase):\n\n def test_not_wait_for_step_zero(self):\n with tf.Graph().as_default():\n tf.contrib.framework.get_or_create_global_step()\n hook = tf.train.GlobalStepWaiterHook(wait_until_step=0)\n hook.begin()\n with tf.Session() as sess:\n # Before run should return without waiting gstep increment.\n hook.before_run(\n tf.train.SessionRunContext(\n original_args=None, session=sess))\n\n def test_wait_for_step(self):\n with tf.Graph().as_default():\n gstep = tf.contrib.framework.get_or_create_global_step()\n hook = tf.train.GlobalStepWaiterHook(wait_until_step=1000)\n hook.begin()\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n waiter = threading.Thread(\n target=hook.before_run,\n args=(tf.train.SessionRunContext(\n original_args=None, session=sess),))\n waiter.daemon = True\n waiter.start()\n time.sleep(1.0)\n self.assertTrue(waiter.is_alive())\n sess.run(tf.assign(gstep, 500))\n time.sleep(1.0)\n self.assertTrue(waiter.is_alive())\n sess.run(tf.assign(gstep, 1100))\n time.sleep(1.2)\n self.assertFalse(waiter.is_alive())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"tensor_util tests.\"\"\"\n\n# pylint: disable=unused-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import deprecation\n\n\nclass DeprecationTest(tf.test.TestCase):\n\n def _assert_subset(self, expected_subset, actual_set):\n self.assertTrue(\n actual_set.issuperset(expected_subset),\n msg=\"%s is not a superset of %s.\" % (actual_set, expected_subset))\n\n def test_deprecated_illegal_args(self):\n instructions = \"This is how you update...\"\n with self.assertRaisesRegexp(ValueError, \"date\"):\n deprecation.deprecated(None, instructions)\n with self.assertRaisesRegexp(ValueError, \"date\"):\n deprecation.deprecated(\"\", instructions)\n with self.assertRaisesRegexp(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated(\"07-04-2016\", instructions)\n date = \"2016-07-04\"\n with self.assertRaisesRegexp(ValueError, \"instructions\"):\n deprecation.deprecated(date, None)\n with self.assertRaisesRegexp(ValueError, \"instructions\"):\n deprecation.deprecated(date, \"\")\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions)\n def _fn(arg0, arg1):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\n Args:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n\"\n \"\\n Returns:\"\n \"\\n Sum of args.\"\n \"\\n \" % (date, instructions),\n _fn.__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions)\n def _fn(arg0, arg1):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions),\n _fn.__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions)\n def _fn(arg0, arg1):\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"DEPRECATED FUNCTION\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions),\n _fn.__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_instance_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(date, instructions)\n def _fn(self, arg0, arg1):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\n Args:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n\"\n \"\\n Returns:\"\n \"\\n Sum of args.\"\n \"\\n \" % (date, instructions),\n getattr(_Object, \"_fn\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _Object()._fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_instance_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(date, instructions)\n def _fn(self, arg0, arg1):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions),\n getattr(_Object, \"_fn\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _Object()._fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_instance_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(date, instructions)\n def _fn(self, arg0, arg1):\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"DEPRECATED FUNCTION\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions),\n getattr(_Object, \"_fn\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _Object()._fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n def test_prop_wrong_order(self):\n with self.assertRaisesRegexp(\n ValueError,\n \"make sure @property appears before @deprecated in your source code\"):\n # pylint: disable=unused-variable\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(\"2016-07-04\", \"Instructions.\")\n @property\n def _prop(self):\n return \"prop_wrong_order\"\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_prop_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @property\n @deprecation.deprecated(date, instructions)\n def _prop(self):\n \"\"\"prop doc.\n\n Returns:\n String.\n \"\"\"\n return \"prop_with_doc\"\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"prop doc. (deprecated)\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\"\n \"\\n\"\n \"\\n Returns:\"\n \"\\n String.\"\n \"\\n \" % (date, instructions),\n getattr(_Object, \"_prop\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(\"prop_with_doc\", _Object()._prop)\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_prop_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @property\n @deprecation.deprecated(date, instructions)\n def _prop(self):\n return \"prop_no_doc\"\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"DEPRECATED FUNCTION\"\n \"\\n\"\n \"\\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions),\n getattr(_Object, \"_prop\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(\"prop_no_doc\", _Object()._prop)\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n\nclass DeprecatedArgsTest(tf.test.TestCase):\n\n def _assert_subset(self, expected_subset, actual_set):\n self.assertTrue(\n actual_set.issuperset(expected_subset),\n msg=\"%s is not a superset of %s.\" % (actual_set, expected_subset))\n\n def test_deprecated_illegal_args(self):\n instructions = \"This is how you update...\"\n date = \"2016-07-04\"\n with self.assertRaisesRegexp(ValueError, \"date\"):\n deprecation.deprecated_args(None, instructions, \"deprecated\")\n with self.assertRaisesRegexp(ValueError, \"date\"):\n deprecation.deprecated_args(\"\", instructions, \"deprecated\")\n with self.assertRaisesRegexp(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated_args(\"07-04-2016\", instructions, \"deprecated\")\n with self.assertRaisesRegexp(ValueError, \"instructions\"):\n deprecation.deprecated_args(date, None, \"deprecated\")\n with self.assertRaisesRegexp(ValueError, \"instructions\"):\n deprecation.deprecated_args(date, \"\", \"deprecated\")\n with self.assertRaisesRegexp(ValueError, \"argument\"):\n deprecation.deprecated_args(date, instructions)\n\n def test_deprecated_missing_args(self):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n def _fn(arg0, arg1, deprecated=None):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert calls without the deprecated argument log nothing.\n with self.assertRaisesRegexp(ValueError, \"not present.*\\\\['missing'\\\\]\"):\n deprecation.deprecated_args(date, instructions, \"missing\")(_fn)\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated arguments)\"\n \"\\n\"\n \"\\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\n Args:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n deprecated: Deprecated!\"\n \"\\n\"\n \"\\n Returns:\"\n \"\\n Sum of args.\"\n \"\\n \" % (date, instructions),\n _fn.__doc__)\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated arguments)\"\n \"\\n\"\n \"\\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions),\n _fn.__doc__)\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, deprecated=True):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"DEPRECATED FUNCTION ARGUMENTS\"\n \"\\n\"\n \"\\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions),\n _fn.__doc__)\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_varargs(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, *deprecated):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True, False))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_kwargs(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, **deprecated):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, a=True, b=False))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_positional_and_named(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"d1\", \"d2\")\n def _fn(arg0, d1=None, arg1=2, d2=None):\n return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1\n\n # Assert calls without the deprecated arguments log nothing.\n self.assertEqual(2, _fn(1, arg1=2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated arguments log warnings.\n self.assertEqual(2, _fn(1, None, 2, d2=False))\n self.assertEqual(2, mock_warning.call_count)\n (args1, _) = mock_warning.call_args_list[0]\n self.assertRegexpMatches(args1[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions, \"d1\"]), set(args1[1:]))\n (args2, _) = mock_warning.call_args_list[1]\n self.assertRegexpMatches(args1[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions, \"d2\"]), set(args2[1:]))\n\n\nclass DeprecatedArgValuesTest(tf.test.TestCase):\n\n def _assert_subset(self, expected_subset, actual_set):\n self.assertTrue(\n actual_set.issuperset(expected_subset),\n msg=\"%s is not a superset of %s.\" % (actual_set, expected_subset))\n\n def test_deprecated_illegal_args(self):\n instructions = \"This is how you update...\"\n with self.assertRaisesRegexp(ValueError, \"date\"):\n deprecation.deprecated_arg_values(\n None, instructions, deprecated=True)\n with self.assertRaisesRegexp(ValueError, \"date\"):\n deprecation.deprecated_arg_values(\n \"\", instructions, deprecated=True)\n with self.assertRaisesRegexp(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated_arg_values(\n \"07-04-2016\", instructions, deprecated=True)\n date = \"2016-07-04\"\n with self.assertRaisesRegexp(ValueError, \"instructions\"):\n deprecation.deprecated_arg_values(\n date, None, deprecated=True)\n with self.assertRaisesRegexp(ValueError, \"instructions\"):\n deprecation.deprecated_arg_values(\n date, \"\", deprecated=True)\n with self.assertRaisesRegexp(ValueError, \"argument\", deprecated=True):\n deprecation.deprecated_arg_values(\n date, instructions)\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, deprecated=True)\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated arguments)\"\n \"\\n\"\n \"\\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\n Args:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n deprecated: Deprecated!\"\n \"\\n\"\n \"\\n Returns:\"\n \"\\n Sum of args.\"\n \"\\n \" % (date, instructions),\n _fn.__doc__)\n\n # Assert calling new fn with non-deprecated value logs nothing.\n self.assertEqual(3, _fn(1, 2, deprecated=False))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calling new fn with deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2, deprecated=True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n # Assert calling new fn with default deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(2, mock_warning.call_count)\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, deprecated=True)\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated arguments)\"\n \"\\n\"\n \"\\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions),\n _fn.__doc__)\n\n # Assert calling new fn with non-deprecated value logs nothing.\n self.assertEqual(3, _fn(1, 2, deprecated=False))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calling new fn with deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2, deprecated=True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n # Assert calling new fn with default deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(2, mock_warning.call_count)\n\n @tf.test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_static_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, deprecated=True)\n def _fn(arg0, arg1, deprecated=True):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"DEPRECATED FUNCTION ARGUMENTS\"\n \"\\n\"\n \"\\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions),\n _fn.__doc__)\n\n # Assert calling new fn with non-deprecated value logs nothing.\n self.assertEqual(3, _fn(1, 2, deprecated=False))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2, deprecated=True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegexpMatches(args[0], r\"deprecated and will be removed after\")\n self._assert_subset(set([date, instructions]), set(args[1:]))\n\n # Assert calling new fn with default deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(2, mock_warning.call_count)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Fake summary writer for unit tests.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.summary.writer import writer_cache\nfrom tensorflow.python.training import summary_io\n\n\n# TODO(ptucker): Replace with mock framework.\nclass FakeSummaryWriter(object):\n \"\"\"Fake summary writer.\"\"\"\n\n _replaced_summary_writer = None\n\n @classmethod\n def install(cls):\n if cls._replaced_summary_writer:\n raise ValueError('FakeSummaryWriter already installed.')\n cls._replaced_summary_writer = summary_io.SummaryWriter\n summary_io.SummaryWriter = FakeSummaryWriter\n writer_cache.SummaryWriter = FakeSummaryWriter\n\n @classmethod\n def uninstall(cls):\n if not cls._replaced_summary_writer:\n raise ValueError('FakeSummaryWriter not installed.')\n summary_io.SummaryWriter = cls._replaced_summary_writer\n writer_cache.SummaryWriter = cls._replaced_summary_writer\n cls._replaced_summary_writer = None\n\n def __init__(self, logdir, graph=None):\n self._logdir = logdir\n self._graph = graph\n self._summaries = {}\n self._added_graphs = []\n self._added_meta_graphs = []\n self._added_session_logs = []\n\n @property\n def summaries(self):\n return self._summaries\n\n def assert_summaries(\n self, test_case, expected_logdir=None, expected_graph=None,\n expected_summaries=None, expected_added_graphs=None,\n expected_added_meta_graphs=None, expected_session_logs=None):\n \"\"\"Assert expected items have been added to summary writer.\"\"\"\n if expected_logdir is not None:\n test_case.assertEqual(expected_logdir, self._logdir)\n if expected_graph is not None:\n test_case.assertTrue(expected_graph is self._graph)\n expected_summaries = expected_summaries or {}\n for step in expected_summaries:\n test_case.assertTrue(\n step in self._summaries,\n msg='Missing step %s from %s.' % (step, self._summaries.keys()))\n actual_simple_values = {}\n for step_summary in self._summaries[step]:\n for v in step_summary.value:\n # Ignore global_step/sec since it's written by Supervisor in a\n # separate thread, so it's non-deterministic how many get written.\n if 'global_step/sec' != v.tag:\n actual_simple_values[v.tag] = v.simple_value\n test_case.assertEqual(expected_summaries[step], actual_simple_values)\n if expected_added_graphs is not None:\n test_case.assertEqual(expected_added_graphs, self._added_graphs)\n if expected_added_meta_graphs is not None:\n test_case.assertEqual(expected_added_meta_graphs, self._added_meta_graphs)\n if expected_session_logs is not None:\n test_case.assertEqual(expected_session_logs, self._added_session_logs)\n\n def add_summary(self, summary, current_global_step):\n \"\"\"Add summary.\"\"\"\n if isinstance(summary, bytes):\n summary_proto = summary_pb2.Summary()\n summary_proto.ParseFromString(summary)\n summary = summary_proto\n if current_global_step in self._summaries:\n step_summaries = self._summaries[current_global_step]\n else:\n step_summaries = []\n self._summaries[current_global_step] = step_summaries\n step_summaries.append(summary)\n\n # NOTE: Ignore global_step since its value is non-deterministic.\n def add_graph(self, graph, global_step=None, graph_def=None):\n \"\"\"Add graph.\"\"\"\n if (global_step is not None) and (global_step < 0):\n raise ValueError('Invalid global_step %s.' % global_step)\n if graph_def is not None:\n raise ValueError('Unexpected graph_def %s.' % graph_def)\n self._added_graphs.append(graph)\n\n def add_meta_graph(self, meta_graph_def, global_step=None):\n \"\"\"Add metagraph.\"\"\"\n if (global_step is not None) and (global_step < 0):\n raise ValueError('Invalid global_step %s.' % global_step)\n self._added_meta_graphs.append(meta_graph_def)\n\n # NOTE: Ignore global_step since its value is non-deterministic.\n def add_session_log(self, session_log, global_step=None):\n # pylint: disable=unused-argument\n self._added_session_logs.append(session_log)\n\n def flush(self):\n pass\n\n def reopen(self):\n pass\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Logging and Summary Operations.\"\"\"\n# pylint: disable=protected-access\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_logging_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_logging_ops import *\n# pylint: enable=wildcard-import\n\n\n# The python wrapper for Assert is in control_flow_ops, as the Assert\n# call relies on certain conditionals for its dependencies. Use\n# control_flow_ops.Assert.\n\n\n# Assert and Print are special symbols in python, so we must\n# use an upper-case version of them.\ndef Print(input_, data, message=None, first_n=None, summarize=None,\n name=None):\n \"\"\"Prints a list of tensors.\n\n This is an identity op with the side effect of printing `data` when\n evaluating.\n\n Args:\n input_: A tensor passed through this op.\n data: A list of tensors to print out when op is evaluated.\n message: A string, prefix of the error message.\n first_n: Only log `first_n` number of times. Negative numbers log always;\n this is the default.\n summarize: Only print this many entries of each tensor. If None, then a\n maximum of 3 elements are printed per input tensor.\n name: A name for the operation (optional).\n\n Returns:\n Same tensor as `input_`.\n \"\"\"\n return gen_logging_ops._print(input_, data, message, first_n, summarize, name)\n\n\[email protected](\"Print\")\ndef _PrintGrad(op, *grad):\n return list(grad) + [None] * (len(op.inputs) - 1)\n\n\ndef _Collect(val, collections, default_collections):\n if collections is None:\n collections = default_collections\n for key in collections:\n ops.add_to_collection(key, val)\n\n\ndef histogram_summary(tag, values, collections=None, name=None):\n \"\"\"Outputs a `Summary` protocol buffer with a histogram.\n\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n\n This op reports an `InvalidArgument` error if any value is not finite.\n\n Args:\n tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.\n values: A real numeric `Tensor`. Any shape. Values to use to\n build the histogram.\n collections: Optional list of graph collections keys. The new summary op is\n added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.\n name: A name for the operation (optional).\n\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n with ops.name_scope(name, \"HistogramSummary\", [tag, values]) as scope:\n val = gen_logging_ops._histogram_summary(\n tag=tag, values=values, name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val\n\n\ndef image_summary(tag, tensor, max_images=3, collections=None, name=None):\n \"\"\"Outputs a `Summary` protocol buffer with images.\n\n The summary has up to `max_images` summary values containing images. The\n images are built from `tensor` which must be 4-D with shape `[batch_size,\n height, width, channels]` and where `channels` can be:\n\n * 1: `tensor` is interpreted as Grayscale.\n * 3: `tensor` is interpreted as RGB.\n * 4: `tensor` is interpreted as RGBA.\n\n The images have the same number of channels as the input tensor. For float\n input, the values are normalized one image at a time to fit in the range\n `[0, 255]`. `uint8` values are unchanged. The op uses two different\n normalization algorithms:\n\n * If the input values are all positive, they are rescaled so the largest one\n is 255.\n\n * If any input value is negative, the values are shifted so input value 0.0\n is at 127. They are then rescaled so that either the smallest value is 0,\n or the largest one is 255.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_images` is 1, the summary value tag is '*tag*/image'.\n * If `max_images` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.\n\n Args:\n tag: A scalar `Tensor` of type `string`. Used to build the `tag`\n of the summary values.\n tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,\n width, channels]` where `channels` is 1, 3, or 4.\n max_images: Max number of batch elements to generate images for.\n collections: Optional list of ops.GraphKeys. The collections to add the\n summary to. Defaults to [ops.GraphKeys.SUMMARIES]\n name: A name for the operation (optional).\n\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n with ops.name_scope(name, \"ImageSummary\", [tag, tensor]) as scope:\n val = gen_logging_ops._image_summary(\n tag=tag, tensor=tensor, max_images=max_images, name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val\n\n\ndef audio_summary(tag,\n tensor,\n sample_rate,\n max_outputs=3,\n collections=None,\n name=None):\n \"\"\"Outputs a `Summary` protocol buffer with audio.\n\n The summary has up to `max_outputs` summary values containing audio. The\n audio is built from `tensor` which must be 3-D with shape `[batch_size,\n frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are\n assumed to be in the range of `[-1.0, 1.0]` with a sample rate of\n `sample_rate`.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n * If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.\n\n Args:\n tag: A scalar `Tensor` of type `string`. Used to build the `tag`\n of the summary values.\n tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`\n or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.\n sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the\n signal in hertz.\n max_outputs: Max number of batch elements to generate audio for.\n collections: Optional list of ops.GraphKeys. The collections to add the\n summary to. Defaults to [ops.GraphKeys.SUMMARIES]\n name: A name for the operation (optional).\n\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n with ops.name_scope(name, \"AudioSummary\", [tag, tensor]) as scope:\n sample_rate = ops.convert_to_tensor(sample_rate, dtype=dtypes.float32,\n name=\"sample_rate\")\n val = gen_logging_ops._audio_summary_v2(tag=tag,\n tensor=tensor,\n max_outputs=max_outputs,\n sample_rate=sample_rate,\n name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val\n\n\ndef merge_summary(inputs, collections=None, name=None):\n # pylint: disable=line-too-long\n \"\"\"Merges summaries.\n\n This op creates a\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n protocol buffer that contains the union of all the values in the input\n summaries.\n\n When the Op is run, it reports an `InvalidArgument` error if multiple values\n in the summaries to merge use the same tag.\n\n Args:\n inputs: A list of `string` `Tensor` objects containing serialized `Summary`\n protocol buffers.\n collections: Optional list of graph collections keys. The new summary op is\n added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.\n name: A name for the operation (optional).\n\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer resulting from the merging.\n \"\"\"\n with ops.name_scope(name, \"MergeSummary\", inputs):\n val = gen_logging_ops._merge_summary(inputs=inputs, name=name)\n _Collect(val, collections, [])\n return val\n\n\ndef merge_all_summaries(key=ops.GraphKeys.SUMMARIES):\n \"\"\"Merges all summaries collected in the default graph.\n\n Args:\n key: `GraphKey` used to collect the summaries. Defaults to\n `GraphKeys.SUMMARIES`.\n\n Returns:\n If no summaries were collected, returns None. Otherwise returns a scalar\n `Tensor` of type `string` containing the serialized `Summary` protocol\n buffer resulting from the merging.\n \"\"\"\n summary_ops = ops.get_collection(key)\n if not summary_ops:\n return None\n else:\n return merge_summary(summary_ops)\n\n\ndef get_summary_op():\n \"\"\"Returns a single Summary op that would run all summaries.\n\n Either existing one from `SUMMARY_OP` collection or merges all existing\n summaries.\n\n Returns:\n If no summaries were collected, returns None. Otherwise returns a scalar\n `Tensor` of type `string` containing the serialized `Summary` protocol\n buffer resulting from the merging.\n \"\"\"\n summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)\n if summary_op is not None:\n if summary_op:\n summary_op = summary_op[0]\n else:\n summary_op = None\n if summary_op is None:\n summary_op = merge_all_summaries()\n if summary_op is not None:\n ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)\n return summary_op\n\n\ndef scalar_summary(tags, values, collections=None, name=None):\n \"\"\"Outputs a `Summary` protocol buffer with scalar values.\n\n The input `tags` and `values` must have the same shape. The generated\n summary has a summary value for each tag-value pair in `tags` and `values`.\n\n Args:\n tags: A `string` `Tensor`. Tags for the summaries.\n values: A real numeric Tensor. Values for the summaries.\n collections: Optional list of graph collections keys. The new summary op is\n added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.\n name: A name for the operation (optional).\n\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n with ops.name_scope(name, \"ScalarSummary\", [tags, values]) as scope:\n val = gen_logging_ops._scalar_summary(tags=tags, values=values, name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val\n\n\nops.NotDifferentiable(\"HistogramAccumulatorSummary\")\nops.NotDifferentiable(\"HistogramSummary\")\nops.NotDifferentiable(\"ImageSummary\")\nops.NotDifferentiable(\"AudioSummary\")\nops.NotDifferentiable(\"AudioSummaryV2\")\nops.NotDifferentiable(\"MergeSummary\")\nops.NotDifferentiable(\"ScalarSummary\")\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Builds the CIFAR-10 network.\n\nSummary of available functions:\n\n # Compute input images and labels for training. If you would like to run\n # evaluations, use inputs() instead.\n inputs, labels = distorted_inputs()\n\n # Compute inference on the model inputs to make a prediction.\n predictions = inference(inputs)\n\n # Compute the total loss of the prediction with respect to the labels.\n loss = loss(predictions, labels)\n\n # Create a graph to run one step of training with respect to the loss.\n train_op = train(loss, global_step)\n\"\"\"\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport re\nimport sys\nimport tarfile\n\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.models.image.cifar10 import cifar10_input\n\nFLAGS = tf.app.flags.FLAGS\n\n# Basic model parameters.\ntf.app.flags.DEFINE_integer('batch_size', 128,\n \"\"\"Number of images to process in a batch.\"\"\")\ntf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',\n \"\"\"Path to the CIFAR-10 data directory.\"\"\")\ntf.app.flags.DEFINE_boolean('use_fp16', False,\n \"\"\"Train the model using fp16.\"\"\")\n\n# Global constants describing the CIFAR-10 data set.\nIMAGE_SIZE = cifar10_input.IMAGE_SIZE\nNUM_CLASSES = cifar10_input.NUM_CLASSES\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n\n# Constants describing the training process.\nMOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.\nNUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.\nLEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.\nINITIAL_LEARNING_RATE = 0.1 # Initial learning rate.\n\n# If a model is trained with multiple GPUs, prefix all Op names with tower_name\n# to differentiate the operations. Note that this prefix is removed from the\n# names of the summaries when visualizing a model.\nTOWER_NAME = 'tower'\n\nDATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'\n\n\ndef _activation_summary(x):\n \"\"\"Helper to create summaries for activations.\n\n Creates a summary that provides a histogram of activations.\n Creates a summary that measures the sparsity of activations.\n\n Args:\n x: Tensor\n Returns:\n nothing\n \"\"\"\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.histogram_summary(tensor_name + '/activations', x)\n tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))\n\n\ndef _variable_on_cpu(name, shape, initializer):\n \"\"\"Helper to create a Variable stored on CPU memory.\n\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n\n Returns:\n Variable Tensor\n \"\"\"\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var\n\n\ndef _variable_with_weight_decay(name, shape, stddev, wd):\n \"\"\"Helper to create an initialized Variable with weight decay.\n\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n\n Returns:\n Variable Tensor\n \"\"\"\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n\ndef distorted_inputs():\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n \"\"\"\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\n images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,\n batch_size=FLAGS.batch_size)\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n return images, labels\n\n\ndef inputs(eval_data):\n \"\"\"Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n \"\"\"\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\n images, labels = cifar10_input.inputs(eval_data=eval_data,\n data_dir=data_dir,\n batch_size=FLAGS.batch_size)\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n return images, labels\n\n\ndef inference(images):\n \"\"\"Build the CIFAR-10 model.\n\n Args:\n images: Images returned from distorted_inputs() or inputs().\n\n Returns:\n Logits.\n \"\"\"\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n #\n # conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 3, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv1)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # norm1\n norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 64, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv2)\n\n # norm2\n norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm2')\n # pool2\n pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n # local3\n with tf.variable_scope('local3') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n _activation_summary(local3)\n\n # local4\n with tf.variable_scope('local4') as scope:\n weights = _variable_with_weight_decay('weights', shape=[384, 192],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n _activation_summary(local4)\n\n # linear layer(WX + b),\n # We don't apply softmax here because \n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits \n # and performs the softmax internally for efficiency.\n with tf.variable_scope('softmax_linear') as scope:\n weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n stddev=1/192.0, wd=0.0)\n biases = _variable_on_cpu('biases', [NUM_CLASSES],\n tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n _activation_summary(softmax_linear)\n\n return softmax_linear\n\n\ndef loss(logits, labels):\n \"\"\"Add L2Loss to all the trainable variables.\n\n Add summary for \"Loss\" and \"Loss/avg\".\n Args:\n logits: Logits from inference().\n labels: Labels from distorted_inputs or inputs(). 1-D tensor\n of shape [batch_size]\n\n Returns:\n Loss tensor of type float.\n \"\"\"\n # Calculate the average cross entropy loss across the batch.\n labels = tf.cast(labels, tf.int64)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits, labels, name='cross_entropy_per_example')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n tf.add_to_collection('losses', cross_entropy_mean)\n\n # The total loss is defined as the cross entropy loss plus all of the weight\n # decay terms (L2 loss).\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n\ndef _add_loss_summaries(total_loss):\n \"\"\"Add summaries for losses in CIFAR-10 model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses.\n \"\"\"\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.scalar_summary(l.op.name +' (raw)', l)\n tf.scalar_summary(l.op.name, loss_averages.average(l))\n\n return loss_averages_op\n\n\ndef train(total_loss, global_step):\n \"\"\"Train CIFAR-10 model.\n\n Create an optimizer and apply to all trainable variables. Add moving\n average for all trainable variables.\n\n Args:\n total_loss: Total loss from loss().\n global_step: Integer Variable counting the number of training steps\n processed.\n Returns:\n train_op: op for training.\n \"\"\"\n # Variables that affect learning rate.\n num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\n decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True)\n tf.scalar_summary('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(total_loss)\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n opt = tf.train.GradientDescentOptimizer(lr)\n grads = opt.compute_gradients(total_loss)\n\n # Apply gradients.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.histogram_summary(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.histogram_summary(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n train_op = tf.no_op(name='train')\n\n return train_op\n\n\ndef maybe_download_and_extract():\n \"\"\"Download and extract the tarball from Alex's website.\"\"\"\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n \n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n"
] |
[
[
"tensorflow.train.LoggingTensorHook",
"tensorflow.train.SummaryWriterCache.get",
"tensorflow.python.training.monitored_session.Scaffold",
"tensorflow.contrib.testing.FakeSummaryWriter.uninstall",
"tensorflow.python.training.basic_session_run_hooks._SecondOrStepTimer",
"tensorflow.summary.scalar",
"tensorflow.contrib.testing.FakeSummaryWriter",
"tensorflow.Graph",
"tensorflow.assign_add",
"tensorflow.Variable",
"tensorflow.test.main",
"tensorflow.train.SessionRunContext",
"tensorflow.train.GlobalStepWaiterHook",
"tensorflow.initialize_all_variables",
"tensorflow.contrib.framework.load_variable",
"tensorflow.Session",
"tensorflow.contrib.testing.FakeSummaryWriter.install",
"tensorflow.test.TestCase.setUp",
"tensorflow.train.StopAtStepHook",
"tensorflow.global_variables_initializer",
"tensorflow.no_op",
"tensorflow.train.SummarySaverHook",
"tensorflow.train.StepCounterHook",
"tensorflow.train.Scaffold",
"tensorflow.constant",
"tensorflow.train.CheckpointSaverHook",
"tensorflow.assign",
"tensorflow.train.SummaryWriterCache.clear",
"tensorflow.contrib.framework.get_or_create_global_step",
"tensorflow.python.training.monitored_session._HookedSession"
],
[
"tensorflow.test.mock.patch.object",
"tensorflow.test.main",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.util.deprecation.deprecated_arg_values",
"tensorflow.python.util.deprecation.deprecated"
],
[
"tensorflow.core.framework.summary_pb2.Summary"
],
[
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.gen_logging_ops._merge_summary",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.ops.gen_logging_ops._scalar_summary",
"tensorflow.python.ops.gen_logging_ops._audio_summary_v2",
"tensorflow.python.ops.gen_logging_ops._print",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.gen_logging_ops._histogram_summary",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.gen_logging_ops._image_summary"
],
[
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.cast",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.nn.l2_loss",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.nn.conv2d",
"tensorflow.models.image.cifar10.cifar10_input.inputs",
"tensorflow.get_collection",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.truncated_normal_initializer",
"tensorflow.train.exponential_decay",
"tensorflow.trainable_variables",
"tensorflow.matmul",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.no_op",
"tensorflow.add_to_collection",
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.models.image.cifar10.cifar10_input.distorted_inputs",
"tensorflow.reduce_mean",
"tensorflow.scalar_summary",
"tensorflow.reshape",
"tensorflow.nn.zero_fraction",
"tensorflow.constant_initializer",
"tensorflow.nn.lrn",
"tensorflow.histogram_summary",
"tensorflow.variable_scope"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12"
]
}
] |
colombod/image-tools
|
[
"40f615307ed95b07de09e5ceb101a5e3d545222d"
] |
[
"app/components/model.py"
] |
[
"import os\nfrom PyQt5.QtWidgets import (QPushButton, QVBoxLayout, QHBoxLayout, QFrame, QLabel, QFileDialog, QMessageBox, QComboBox,\n QProgressBar, QSizePolicy)\nfrom app.components.stretch_wrapper import NoStretch\nimport pandas as pd\nfrom model.predict_from_file import predict_dataset\n\n\nclass Model(QFrame):\n\tdefault_model_text = \"<i>Please select a TensorFlow model directory.<\\i>\"\n\tdefault_file_text = \"<i>Please select a file.<\\i>\"\n\tpredict_text = \"Predict\"\n\tpredicting_text = \"Predicting...\"\n\n\tdef __init__(self, app):\n\t\tsuper().__init__()\n\t\t# initialize our variables\n\t\tself.app = app\n\t\tself.tf_directory = None\n\t\tself.file = None\n\t\tself.init_ui()\n\n\tdef init_ui(self):\n\t\t# make our UI\n\t\tself.setObjectName(\"content\")\n\t\tlayout = QHBoxLayout()\n\t\tlayout.setContentsMargins(0, 0, 0, 0)\n\n\t\t# our main content area\n\t\tcontent = QFrame()\n\t\tcontent_layout = QVBoxLayout()\n\n\t\t# some info\n\t\ttitle = QLabel(\"Model\")\n\t\ttitle.setObjectName(\"h1\")\n\t\tdescription = QLabel(\"Run your exported TensorFlow model from Lobe \\non a .csv or .xlsx of image URLs.\\nThis will produce a new .csv with the original URLs, \\nthe model's prediction, and the model's confidence.\")\n\t\tdescription.setObjectName(\"h2\")\n\n\t\t# model select button\n\t\tself.model_button = QPushButton(\"Select model directory\")\n\t\tself.model_button.clicked.connect(self.select_directory)\n\t\tmodel_container = NoStretch(self.model_button)\n\t\tmodel_container.setObjectName(\"separate\")\n\t\tself.model_label = QLabel(self.default_model_text)\n\n\t\t# file selection button\n\t\tself.file_button = QPushButton(\"Select file\")\n\t\tself.file_button.clicked.connect(self.select_file)\n\t\tbutton_container = NoStretch(self.file_button)\n\t\tbutton_container.setObjectName(\"separate\")\n\t\tself.path_label = QLabel(self.default_file_text)\n\n\t\t# url column header\n\t\tself.url_label = QLabel(\"Column with image URLs:\")\n\t\tself.url_label.setObjectName(\"separateSmall\")\n\t\tself.url_label.hide()\n\t\tself.url_dropdown = QComboBox()\n\t\tself.url_dropdown.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n\t\tself.url_dropdown.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)\n\t\tself.url_container = NoStretch(self.url_dropdown)\n\t\tself.url_container.hide()\n\n\t\t# predict button\n\t\tself.predict_button = QPushButton(self.predict_text)\n\t\tself.predict_button.setEnabled(False)\n\t\tself.predict_button.clicked.connect(self.predict)\n\t\tpredict_container = NoStretch(self.predict_button)\n\t\tpredict_container.setObjectName(\"separate\")\n\n\t\tself.progress_bar = QProgressBar()\n\t\tself.progress_bar.hide()\n\n\t\t# make our content layout\n\t\tcontent_layout.addWidget(title)\n\t\tcontent_layout.addWidget(description)\n\t\tcontent_layout.addWidget(model_container)\n\t\tcontent_layout.addWidget(self.model_label)\n\t\tcontent_layout.addWidget(button_container)\n\t\tcontent_layout.addWidget(self.path_label)\n\t\tcontent_layout.addWidget(self.url_label)\n\t\tcontent_layout.addWidget(self.url_container)\n\t\tcontent_layout.addWidget(predict_container)\n\t\tcontent_layout.addWidget(self.progress_bar)\n\t\tcontent_layout.addStretch(1)\n\t\tcontent.setLayout(content_layout)\n\n\t\tlayout.addWidget(content)\n\t\tlayout.addStretch(1)\n\t\tself.setLayout(layout)\n\n\tdef select_directory(self):\n\t\tself.tf_directory = QFileDialog.getExistingDirectory(self, \"Select TensorFlow Model Directory\")\n\t\tself.model_label.setText(f\"<i>{self.tf_directory}</i>\" if self.tf_directory else self.default_model_text)\n\t\tself.check_predict_button()\n\n\tdef select_file(self):\n\t\tself.file = QFileDialog.getOpenFileName(self, 'Select CSV File', filter=\"CSV (*.csv *.xlsx)\")[0]\n\t\tself.path_label.setText(f\"<i>{self.file}</i>\" if self.file else self.default_file_text)\n\t\tself.parse_headers()\n\t\tself.check_predict_button()\n\n\tdef check_predict_button(self):\n\t\t# enable the button when we have both a model and file\n\t\tif self.tf_directory and self.file:\n\t\t\tself.predict_button.setEnabled(True)\n\t\telse:\n\t\t\tself.predict_button.setEnabled(False)\n\n\tdef parse_headers(self):\n\t\tif self.file:\n\t\t\t# read the file for its headers and set our dropdown boxes appropriately\n\t\t\ttry:\n\t\t\t\tif os.path.splitext(self.file)[1] == \".csv\":\n\t\t\t\t\tcsv = pd.read_csv(self.file, header=0)\n\t\t\t\telse:\n\t\t\t\t\tcsv = pd.read_excel(self.file, header=0)\n\t\t\t\tself.url_dropdown.clear()\n\t\t\t\tfor header in list(csv.columns):\n\t\t\t\t\tself.url_dropdown.addItem(header)\n\t\t\t\tself.url_dropdown.adjustSize()\n\t\t\t\tself.url_label.show()\n\t\t\t\tself.url_container.show()\n\t\t\texcept Exception as e:\n\t\t\t\tQMessageBox.about(self, \"Alert\", f\"Error reading csv: {e}\")\n\t\t\t\tself.clear_headers()\n\t\telse:\n\t\t\tself.clear_headers()\n\n\tdef clear_headers(self):\n\t\tself.url_dropdown.clear()\n\t\tself.url_label.hide()\n\t\tself.url_container.hide()\n\n\tdef predict(self):\n\t\t# disable the buttons so we can't click again\n\t\tself.predict_button.setEnabled(False)\n\t\tself.predict_button.setText(self.predicting_text)\n\t\tself.model_button.setEnabled(False)\n\t\tself.file_button.setEnabled(False)\n\t\tself.progress_bar.setValue(0)\n\t\tself.progress_bar.show()\n\t\tself.app.processEvents()\n\t\turl_col = self.url_dropdown.currentText()\n\t\ttry:\n\t\t\tpredict_dataset(model_dir=self.tf_directory, filepath=self.file, url_col=url_col, progress_hook=self.progress_hook)\n\t\texcept Exception as e:\n\t\t\tQMessageBox.about(self, \"Alert\", f\"Error creating dataset: {e}\")\n\t\t\tself.done()\n\n\tdef progress_hook(self, current, total):\n\t\tself.progress_bar.setValue(float(current) / total * 100)\n\t\tif current == total:\n\t\t\tself.done()\n\t\t# make sure to update the UI\n\t\tself.app.processEvents()\n\n\tdef done(self):\n\t\tself.progress_bar.setValue(0)\n\t\tself.progress_bar.hide()\n\t\tself.predict_button.setEnabled(True)\n\t\tself.predict_button.setText(self.predict_text)\n\t\tself.model_button.setEnabled(True)\n\t\tself.file_button.setEnabled(True)\n\t\tself.app.processEvents()\n"
] |
[
[
"pandas.read_excel",
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
sbisdog/multi_task
|
[
"63f5236649dd344f1582eba0ff630a635b045be8",
"63f5236649dd344f1582eba0ff630a635b045be8"
] |
[
"public/test_scripts/test_on_coco.py",
"public/detection/models/backbone.py"
] |
[
"import time\nimport random\nimport argparse\nimport json\nimport os\nimport sys\nimport warnings\n\nBASE_DIR = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(BASE_DIR)\nwarnings.filterwarnings('ignore')\n\nfrom tqdm import tqdm\nfrom thop import profile\nfrom thop import clever_format\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom public.path import COCO2017_path\nfrom public.detection.dataset.cocodataset import Collater\nfrom public.detection.models.retinanet import RetinaNet\nfrom public.detection.models.fcos import FCOS\nfrom public.detection.models.centernet import CenterNet\nfrom public.detection.models.yolov3 import YOLOV3\nfrom public.detection.models.decode import RetinaDecoder, FCOSDecoder, CenterNetDecoder, YOLOV3Decoder\nfrom public.detection.dataset.cocodataset import CocoDetection, Normalize, Resize\nfrom pycocotools.cocoeval import COCOeval\n\n\ndef _retinanet(arch, use_pretrained_model, pretrained_model_path, num_classes):\n model = RetinaNet(arch, num_classes=num_classes)\n if use_pretrained_model:\n pretrained_models = torch.load(pretrained_model_path,\n map_location=torch.device('cpu'))\n\n # only load state_dict()\n model.load_state_dict(pretrained_models, strict=False)\n\n return model\n\n\ndef _fcos(arch, use_pretrained_model, pretrained_model_path, num_classes):\n model = FCOS(arch, num_classes=num_classes)\n if use_pretrained_model:\n pretrained_models = torch.load(pretrained_model_path,\n map_location=torch.device('cpu'))\n\n # only load state_dict()\n model.load_state_dict(pretrained_models, strict=False)\n\n return model\n\n\ndef _centernet(arch, use_pretrained_model, pretrained_model_path, num_classes):\n model = CenterNet(arch, num_classes=num_classes)\n if use_pretrained_model:\n pretrained_models = torch.load(pretrained_model_path,\n map_location=torch.device('cpu'))\n\n # only load state_dict()\n model.load_state_dict(pretrained_models, strict=False)\n\n return model\n\n\ndef _yolov3(arch, use_pretrained_model, pretrained_model_path, num_classes):\n model = YOLOV3(arch, num_classes=num_classes)\n if use_pretrained_model:\n pretrained_models = torch.load(pretrained_model_path,\n map_location=torch.device('cpu'))\n\n # only load state_dict()\n model.load_state_dict(pretrained_models, strict=False)\n\n return model\n\n\ndef validate(val_dataset, model, decoder, args):\n if args.use_gpu:\n model = model.module\n # switch to evaluate mode\n model.eval()\n with torch.no_grad():\n all_eval_result = evaluate_coco(val_dataset, model, decoder, args)\n\n return all_eval_result\n\n\ndef evaluate_coco(val_dataset, model, decoder, args):\n results, image_ids = [], []\n indexes = []\n for index in range(len(val_dataset)):\n indexes.append(index)\n eval_collater = Collater()\n val_loader = DataLoader(val_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n collate_fn=eval_collater.next)\n\n start_time = time.time()\n\n for i, data in tqdm(enumerate(val_loader)):\n images, scales = torch.tensor(data['img']), torch.tensor(data['scale'])\n per_batch_indexes = indexes[i * args.batch_size:(i + 1) *\n args.batch_size]\n if args.use_gpu:\n images = images.cuda().float()\n else:\n images = images.float()\n\n if args.detector == \"retinanet\":\n cls_heads, reg_heads, batch_anchors = model(images)\n scores, classes, boxes = decoder(cls_heads, reg_heads,\n batch_anchors)\n elif args.detector == \"fcos\":\n cls_heads, reg_heads, center_heads, batch_positions = model(images)\n scores, classes, boxes = decoder(cls_heads, reg_heads,\n center_heads, batch_positions)\n elif args.detector == \"centernet\":\n heatmap_output, offset_output, wh_output = model(images)\n scores, classes, boxes = decoder(heatmap_output, offset_output,\n wh_output)\n elif args.detector == \"yolov3\":\n obj_heads, reg_heads, cls_heads, batch_anchors = model(images)\n scores, classes, boxes = decoder(obj_heads, reg_heads, cls_heads,\n batch_anchors)\n\n scores, classes, boxes = scores.cpu(), classes.cpu(), boxes.cpu()\n scales = scales.unsqueeze(-1).unsqueeze(-1)\n boxes /= scales\n\n for per_image_scores, per_image_classes, per_image_boxes, index in zip(\n scores, classes, boxes, per_batch_indexes):\n # for coco_eval,we need [x_min,y_min,w,h] format pred boxes\n per_image_boxes[:, 2:] -= per_image_boxes[:, :2]\n\n for object_score, object_class, object_box in zip(\n per_image_scores, per_image_classes, per_image_boxes):\n object_score = float(object_score)\n object_class = int(object_class)\n object_box = object_box.tolist()\n if object_class == -1:\n break\n\n image_result = {\n 'image_id':\n val_dataset.image_ids[index],\n 'category_id':\n val_dataset.find_category_id_from_coco_label(object_class),\n 'score':\n object_score,\n 'bbox':\n object_box,\n }\n results.append(image_result)\n\n image_ids.append(val_dataset.image_ids[index])\n\n print('{}/{}'.format(index, len(val_dataset)), end='\\r')\n\n testing_time = (time.time() - start_time)\n per_image_testing_time = testing_time / len(val_dataset)\n\n print(f\"per_image_testing_time:{per_image_testing_time:.3f}\")\n\n if not len(results):\n print(f\"No target detected in test set images\")\n return\n\n json.dump(results,\n open('{}_bbox_results.json'.format(val_dataset.set_name), 'w'),\n indent=4)\n\n # load results in COCO evaluation tool\n coco_true = val_dataset.coco\n coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(\n val_dataset.set_name))\n\n coco_eval = COCOeval(coco_true, coco_pred, 'bbox')\n coco_eval.params.imgIds = image_ids\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n all_eval_result = coco_eval.stats\n\n return all_eval_result\n\n\ndef test_model(args):\n print(args)\n if args.use_gpu:\n # use one Graphics card to test\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n if not torch.cuda.is_available():\n raise Exception(\"need gpu to test network!\")\n torch.cuda.empty_cache()\n\n if args.seed is not None:\n random.seed(args.seed)\n if args.use_gpu:\n torch.cuda.manual_seed_all(args.seed)\n cudnn.deterministic = True\n\n if args.use_gpu:\n cudnn.benchmark = True\n cudnn.enabled = True\n\n coco_val_dataset = CocoDetection(\n image_root_dir=os.path.join(COCO2017_path, 'images/val2017'),\n annotation_root_dir=os.path.join(COCO2017_path, 'annotations'),\n set=\"val2017\",\n transform=transforms.Compose([\n Normalize(),\n Resize(resize=args.input_image_size),\n ]))\n\n if args.detector == \"retinanet\":\n model = _retinanet(args.backbone, args.use_pretrained_model,\n args.pretrained_model_path, args.num_classes)\n decoder = RetinaDecoder(image_w=args.input_image_size,\n image_h=args.input_image_size,\n min_score_threshold=args.min_score_threshold)\n elif args.detector == \"fcos\":\n model = _fcos(args.backbone, args.use_pretrained_model,\n args.pretrained_model_path, args.num_classes)\n decoder = FCOSDecoder(image_w=args.input_image_size,\n image_h=args.input_image_size,\n min_score_threshold=args.min_score_threshold)\n elif args.detector == \"centernet\":\n model = _centernet(args.backbone, args.use_pretrained_model,\n args.pretrained_model_path, args.num_classes)\n decoder = CenterNetDecoder(\n image_w=args.input_image_size,\n image_h=args.input_image_size,\n min_score_threshold=args.min_score_threshold)\n elif args.detector == \"yolov3\":\n model = _yolov3(args.backbone, args.use_pretrained_model,\n args.pretrained_model_path, args.num_classes)\n decoder = YOLOV3Decoder(image_w=args.input_image_size,\n image_h=args.input_image_size,\n min_score_threshold=args.min_score_threshold)\n else:\n print(\"unsupport detection model!\")\n return\n\n flops_input = torch.randn(1, 3, args.input_image_size,\n args.input_image_size)\n flops, params = profile(model, inputs=(flops_input, ))\n flops, params = clever_format([flops, params], \"%.3f\")\n print(\n f\"backbone:{args.backbone},detector: '{args.detector}', flops: {flops}, params: {params}\"\n )\n\n if args.use_gpu:\n model = model.cuda()\n decoder = decoder.cuda()\n model = nn.DataParallel(model)\n\n print(f\"start eval.\")\n all_eval_result = validate(coco_val_dataset, model, decoder, args)\n print(f\"eval done.\")\n if all_eval_result is not None:\n print(\n f\"val: backbone: {args.backbone}, detector: {args.detector}, IoU=0.5:0.95,area=all,maxDets=100,mAP:{all_eval_result[0]:.3f}, IoU=0.5,area=all,maxDets=100,mAP:{all_eval_result[1]:.3f}, IoU=0.75,area=all,maxDets=100,mAP:{all_eval_result[2]:.3f}, IoU=0.5:0.95,area=small,maxDets=100,mAP:{all_eval_result[3]:.3f}, IoU=0.5:0.95,area=medium,maxDets=100,mAP:{all_eval_result[4]:.3f}, IoU=0.5:0.95,area=large,maxDets=100,mAP:{all_eval_result[5]:.3f}, IoU=0.5:0.95,area=all,maxDets=1,mAR:{all_eval_result[6]:.3f}, IoU=0.5:0.95,area=all,maxDets=10,mAR:{all_eval_result[7]:.3f}, IoU=0.5:0.95,area=all,maxDets=100,mAR:{all_eval_result[8]:.3f}, IoU=0.5:0.95,area=small,maxDets=100,mAR:{all_eval_result[9]:.3f}, IoU=0.5:0.95,area=medium,maxDets=100,mAR:{all_eval_result[10]:.3f}, IoU=0.5:0.95,area=large,maxDets=100,mAR:{all_eval_result[11]:.3f}\"\n )\n\n return\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='PyTorch COCO Detection Testing')\n parser.add_argument('--backbone', type=str, help='name of backbone')\n parser.add_argument('--detector', type=str, help='name of detector')\n parser.add_argument('--batch_size',\n type=int,\n default=1,\n help='inference batch size')\n parser.add_argument('--num_workers',\n type=int,\n default=1,\n help='num workers')\n parser.add_argument('--num_classes',\n type=int,\n default=80,\n help='model class num')\n parser.add_argument('--min_score_threshold',\n type=float,\n default=0.05,\n help='min score threshold')\n parser.add_argument(\"--use_pretrained_model\",\n action=\"store_true\",\n help=\"use pretrained model or not\")\n parser.add_argument('--pretrained_model_path',\n type=str,\n help='pretrained model path')\n parser.add_argument(\"--use_gpu\",\n action=\"store_true\",\n help=\"use gpu to test or not\")\n parser.add_argument('--seed', type=int, default=0, help='seed')\n parser.add_argument('--input_image_size',\n type=int,\n default=667,\n help='input image size')\n args = parser.parse_args()\n test_model(args)\n",
"import os\nimport sys\nimport warnings\n\nBASE_DIR = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.dirname(\n os.path.abspath(__file__)))))\nsys.path.append(BASE_DIR)\nwarnings.filterwarnings('ignore')\n\nimport torch\nimport torch.nn as nn\nfrom public.imagenet import models\n\n\nclass Darknet19Backbone(nn.Module):\n def __init__(self):\n super(Darknet19Backbone, self).__init__()\n self.model = models.__dict__['darknet19'](**{\"pretrained\": True})\n del self.model.avgpool\n del self.model.layer7\n\n def forward(self, x):\n x = self.model.layer1(x)\n x = self.model.maxpool1(x)\n x = self.model.layer2(x)\n C3 = self.model.layer3(x)\n C4 = self.model.layer4(C3)\n C5 = self.model.layer5(C4)\n C5 = self.model.layer6(C5)\n\n del x\n\n return [C3, C4, C5]\n\n\nclass Darknet53Backbone(nn.Module):\n def __init__(self):\n super(Darknet53Backbone, self).__init__()\n self.model = models.__dict__['darknet53'](**{\"pretrained\": True})\n del self.model.fc\n del self.model.avgpool\n\n def forward(self, x):\n x = self.model.conv1(x)\n x = self.model.conv2(x)\n x = self.model.block1(x)\n x = self.model.conv3(x)\n x = self.model.block2(x)\n x = self.model.conv4(x)\n C3 = self.model.block3(x)\n C4 = self.model.conv5(C3)\n C4 = self.model.block4(C4)\n C5 = self.model.conv6(C4)\n C5 = self.model.block5(C5)\n\n del x\n\n return [C3, C4, C5]\n\n\nclass EfficientNetBackbone(nn.Module):\n def __init__(self, efficientnet_type=\"efficientnet_b0\"):\n super(EfficientNetBackbone, self).__init__()\n self.model = models.__dict__[efficientnet_type](**{\"pretrained\": True})\n del self.model.dropout\n del self.model.fc\n del self.model.avgpool\n del self.model.conv_head\n\n def forward(self, x):\n x = self.model.stem(x)\n\n feature_maps = []\n last_x = None\n for index, block in enumerate(self.model.blocks):\n x = block(x)\n if block.stride == 2:\n feature_maps.append(last_x)\n elif index == len(self.model.blocks) - 1:\n feature_maps.append(x)\n last_x = x\n\n del last_x\n\n return feature_maps[2:]\n\n\nclass ResNetBackbone(nn.Module):\n def __init__(self, resnet_type=\"resnet50\"):\n super(ResNetBackbone, self).__init__()\n self.model = models.__dict__[resnet_type](**{\"pretrained\": False})\n del self.model.fc\n del self.model.avgpool\n\n def forward(self, x):\n x = self.model.conv1(x)\n x = self.model.bn1(x)\n x = self.model.relu(x)\n x = self.model.maxpool(x)\n\n C2 = self.model.layer1(x)\n C3 = self.model.layer2(C2)\n C4 = self.model.layer3(C3)\n C5 = self.model.layer4(C4)\n\n del x\n\n return [C2, C3, C4, C5]\n\n\nclass VovNetBackbone(nn.Module):\n def __init__(self, vovnet_type='VoVNet39_se'):\n super(VovNetBackbone, self).__init__()\n self.model = models.__dict__[vovnet_type](**{\"pretrained\": True})\n del self.model.fc\n del self.model.avgpool\n\n def forward(self, x):\n x = self.model.stem(x)\n\n features = []\n for stage in self.model.stages:\n x = stage(x)\n features.append(x)\n\n del x\n\n return features[1:]\n\n\nif __name__ == '__main__':\n # net = ResNetBackbone(resnet_type=\"resnet50\")\n # images = torch.randn(8, 3, 640, 640)\n # [C3, C4, C5] = net(images)\n # print(\"1111\", C3.shape, C4.shape, C5.shape)\n # net = EfficientNetBackbone(efficientnet_type=\"efficientnet_b0\")\n # images = torch.randn(8, 3, 640, 640)\n # [C3, C4, C5] = net(images)\n # print(\"1111\", C3.shape, C4.shape, C5.shape)\n net1 = Darknet53Backbone()\n images = torch.randn(8, 3, 416, 416)\n [C3, C4, C5] = net1(images)\n print(\"1111\", C3.shape, C4.shape, C5.shape)\n net2 = Darknet19Backbone()\n images = torch.randn(8, 3, 416, 416)\n [C3, C4, C5] = net2(images)\n print(\"1111\", C3.shape, C4.shape, C5.shape)"
] |
[
[
"torch.randn",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.device",
"torch.nn.DataParallel"
],
[
"torch.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
interpss/DeepMachineLearning
|
[
"7799e188996105ab51f2aef45262b04d336ad89b"
] |
[
"lfGenNPred/py/single_net/predict_voltage1.py"
] |
[
"'''\r\n Copyright (C) 2005-17 www.interpss.org\r\n \r\n Licensed under the Apache License, Version 2.0 (the \"License\");\r\n you may not use this file except in compliance with the License.\r\n You may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n \r\n Unless required by applicable law or agreed to in writing, software\r\n distributed under the License is distributed on an \"AS IS\" BASIS,\r\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n See the License for the specific language governing permissions and\r\n limitations under the License.\r\n'''\r\n\r\n'''\r\n Use NN-model to predict the bus voltage for a set of scale-factors\r\n'''\r\n\r\nfrom datetime import datetime\r\n\r\nimport tensorflow as tf\r\n\r\nimport sys\r\nsys.path.insert(0, '..')\r\n\r\nimport lib.common_func as cf\r\n\r\ntrain_points = 100\r\n\r\n# \r\n# load the IEEE-14Bus case\r\n#\r\nfilename = 'testdata/ieee14.ieee'\r\nnoBus, noBranch = cf.ipss_app.loadCase(filename, 'BusVoltLoadChangeTrainCaseBuilder')\r\nprint(filename, ' loaded, no of Buses, Branches:', noBus, ', ', noBranch)\r\n\r\n# define model size\r\nsize = noBus * 2\r\n#print('size: ', size)\r\n\r\n# define model variables\r\nW1 = tf.Variable(tf.zeros([size,size]))\r\nb1 = tf.Variable(tf.zeros([size]))\r\n\r\ninit = tf.initialize_all_variables()\r\n\r\n# define model\r\n\r\ndef nn_model(data):\r\n output = tf.matmul(data, W1) + b1\r\n return output\r\n\r\n# define loss \r\nx = tf.placeholder(tf.float32, [None, size])\r\ny = tf.placeholder(tf.float32)\r\n\r\nerror = tf.square(nn_model(x) - y)\r\nloss = tf.reduce_sum(error)\r\n\r\n# define training optimization\r\noptimizer = tf.train.GradientDescentOptimizer(cf.learning_rate)\r\ntrain = optimizer.minimize(loss)\r\n\r\n# run the computation graph\r\nwith tf.Session() as sess :\r\n sess.run(init)\r\n \r\n # run the training part\r\n # =====================\r\n \r\n print('Begin training: ', datetime.now())\r\n \r\n # retrieve training set\r\n trainSet = cf.ipss_app.getTrainSet(train_points)\r\n train_x, train_y = cf.transfer2PyArrays(trainSet)\r\n \r\n # run the training part\r\n for i in range(cf.train_steps):\r\n if (i % 1000 == 0) : print('Training step: ', i) \r\n sess.run(train, {x:train_x, y:train_y})\r\n\r\n print('End training: ', datetime.now())\r\n \r\n '''\r\n print('W1: ', sess.run(W1))\r\n print('b1: ', sess.run(b1))\r\n '''\r\n \r\n # run the verification part\r\n # =========================\r\n \r\n # retrieve a test case\r\n for factor in [0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.55] :\r\n #for factor in [0.45, 1.0, 1.55] :\r\n testCase = cf.ipss_app.getTestCase(factor)\r\n test_x, test_y = cf.transfer2PyArrays(testCase) \r\n \r\n # compute model output (network voltage)\r\n model_y = sess.run(nn_model(x), {x:test_x})\r\n #printArray(model_y, 'model_y')\r\n \r\n netVoltage = cf.transfer2JavaDblAry(model_y[0], size)\r\n print('model out mismatch: ', cf.ipss_app.getMismatchInfo(netVoltage))\r\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
rsignell-usgs/post_gnome
|
[
"e24492751458570e00d07e7dd1958881f6dfa51b"
] |
[
"post_gnome/kmz_particles.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"\nCode for workign with particle fiels in mkz\n\nOnly handles reading for now\n\"\"\"\n# for py2/3 compatibility\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport zipfile\nimport base64\nfrom datetime import datetime\n\nimport numpy as np\n\nfrom post_gnome import nc_particles\n\nfile_attributes = nc_particles.file_attributes\nvar_attributes = nc_particles.var_attributes\n\n\nclass Writer(object):\n \"\"\"\n class to write kmz files sutable for Google Earth\n \"\"\"\n time_formatter = '%m/%d/%Y %H:%M'\n\n def __init__(self,\n filename,\n num_timesteps=None,\n ref_time=None,\n file_attributes=file_attributes,\n var_attributes=var_attributes\n ):\n\n \"\"\"\n create a kmz_particle file Writer\n\n Creates the kml / kmz file, opens it for writing,\n writes the global attributes, creates required variables etc.\n\n :param filename: name of kmz file to open - if it exists,\n it will be written over!\n\n :param num_timesteps=None: number of timesteps that will be output. Must be defined for netcdf3.\n Can be None for netcdf4\n :type num_timesteps: integer\n\n :param ref_time=None: reference time for time units (i.e. seconds since..).\n If None, the first time used will be used.\n :type ref_time: datetime object\n\n :param file_attributes: keys and values for teh file-level attributes.\n Defaults to the set defined in this module.\n :type file_attributes: dict\n\n :param var_attributes: dist of variable names, and the keys and values for variable\n attributes.\n Defaults to the set defined in this module.\n :type var_attributes: dict\n\n :param nc_version=3: version of netcdf to use -- must be 3 or 4. If 4, some extra\n features are enabled.\n :type nc_version: integer\n \"\"\"\n\n # strip off the .kml or .kmz\n filename = filename.rstrip(\".kml\").rstrip(\".kmz\")\n\n self.filename = filename + \".kmz\"\n self.kml_name = os.path.split(filename)[-1] + \".kml\"\n\n self.num_timesteps = num_timesteps\n self.ref_time = ref_time\n\n self.file_attributes = file_attributes\n self.var_attributes = var_attributes\n\n # create a list to hold what will be the contents of the kml\n self.kml = [header_template.format(caveat=caveat,\n kml_name=self.kml_name,\n # fixme: this is only doing now!\n valid_timestring=datetime.now().strftime(self.time_formatter),\n issued_timestring=datetime.now().strftime(self.time_formatter),\n )]\n # # fixme: put real data in here from nc_file?\n # self.kml = [header_template.format(caveat=caveat,\n # kml_name=self.kml_name,\n # valid_timestring=model_start_time.strftime(self.time_formatter),\n # issued_timestring=datetime.now().strftime(self.time_formatter),\n # )]\n\n # # Global attributes\n # # put some of this in the kml file?\n # for (name, value) in self.file_attributes.items():\n # setattr(nc, name, value)\n self.closed = False\n\n\n\n\n\n def write_timestep(self, timestamp, timestep, data, uncertain=False):\n \"\"\"\n write the data for a timestep\n\n :param timestamp: the time stamp of the timestep\n :type timestamp: datetime object\n\n :param timestep: the timestep between this and the next data point\n :type timestep: datetime object\n\n :param data: dict of data arrays -- all parameters for a single time step\n :type data: dict\n\n :param uncertain=False: Is this an uncertaintly run?\n :type uncertain: bool\n \"\"\"\n\n start_time = timestamp.isoformat()\n end_time = (timestamp + timestep).isoformat()\n\n\n positions = np.c_[data['longitude'], data['latitude']]\n\n try:\n in_water = 2\n on_land = 3\n\n water_positions = positions[data['status_codes'] == in_water]\n beached_positions = positions[data['status_codes'] == on_land]\n except KeyError:\n water_positions = positions\n beached_positions = np.zeros((0, 2), dtype=np.float64)\n\n self.kml.append(build_one_timestep(water_positions,\n beached_positions,\n start_time,\n end_time,\n uncertain\n ))\n\n def close(self):\n \"\"\"\n close the kmz file\n\n This forces the write of the file\n \"\"\"\n if not self.closed:\n self.kml.append(footer)\n with zipfile.ZipFile(self.filename, 'w', compression=zipfile.ZIP_DEFLATED) as kmzfile:\n kmzfile.writestr('dot.png', base64.b64decode(DOT))\n kmzfile.writestr('x.png', base64.b64decode(X))\n # write the kml file\n kmzfile.writestr(self.kml_name, \"\".join(self.kml).encode('utf8'))\n self.closed = True\n return True\n else:\n return False\n\n def __del__(self):\n \"\"\" make sure to close the netcdf file \"\"\"\n # anything to close?\n self.close()\n\n\n\nclass Reader(object):\n \"\"\"\n Class to handle reading a nc_particle file\n\n (such as those written by GNOME or the Writer class above)\n \"\"\"\n def __init__(self, kml_file):\n \"\"\"\n initialize a file reader.\n\n :param kml_file: the kml/kmz file to read.\n :type kml_file: string\n\n \"\"\"\n raise NotImplementedError\n\n @property\n def variables(self):\n \"\"\"\n return the names of all the variables associated with the particles\n \"\"\"\n raise NotImplementedError\n\n def __str__(self):\n return (\"kml_particles Reader object:\\n\"\n \"variables: {}\\n\"\n \"number of timesteps: {}\\n\"\n ).format(self.variables, len(self.times))\n\n def get_all_timesteps(self, variables=['latitude', 'longitude']):\n \"\"\"\n returns the requested variables data from all timesteps as a\n dictionary keyed by the variable names\n\n :param variables: the variables desired as a list string names.\n Defaults to ['latitude','longitude']\n :type variables: list of strings\n\n :returns data: returns a dict of arrays -- the keys are the\n variable names, and the values are numpy arrays\n of the data. The arrays are the flattened ragged\n array of data.\n \"\"\"\n raise NotImplementedError\n\n def get_units(self, variable):\n \"\"\"\n return the units of the given variable\n\n :param variable: name of the variable for which the units are required\n :type variable: string\n \"\"\"\n raise NotImplementedError\n\n def get_attributes(self, variable):\n \"\"\"\n return all the attributes of the given variable\n\n :param variable: name of the variable for which the attributes are required\n :type variable: string\n \"\"\"\n raise NotImplementedError\n\n def get_timestep(self, timestep, variables=['latitude', 'longitude']):\n \"\"\"\n returns the requested variables data from a given timestep as a\n dictionary keyed by the variable names\n\n :param variables: The variables desired as a list string names.\n Defaults to ['latitude','longitude']\n :type variables: list of strings\n\n :returns data: returns a dict of arrays -- the keys are the\n variable names, and the values are numpy arrays\n of the data.\n \"\"\"\n raise NotImplementedError\n\n def get_individual_trajectory(self, particle_id, variables=['latitude', 'longitude']):\n \"\"\"\n returns the requested variables from trajectory of an individual particle\n\n note: this is inefficient -- it has to read the entire file to get it.\n \"\"\"\n raise NotImplementedError\n\n def close(self):\n \"\"\"\n close the kml file\n\n -- anything to be done?\n \"\"\"\n pass\n\n\n def __del__(self):\n \"\"\" make sure to close the file \"\"\"\n self.close()\n\n\ndef nc2kmz(nc_file, kmz_file=None):\n \"\"\"\n convert a nc_particles file to kmz\n\n :param nc_file: name of nertcdf file to read\n\n :param kmz_file=None: name of kmz file to write. If None, the nc_file's name wil be used, with .kmz as teh extansion.\n\n \"\"\"\n\n if kmz_file is None:\n root = nc_file\n root = root[:-3] if root.endswith(\".nc\") else root\n kmz_file = root + \".kmz\"\n\n reader = nc_particles.Reader(nc_file)\n\n # create a kmz writer:\n writer = Writer(kmz_file)\n variables = reader.variables\n # loop to read / write the data\n for step, time in enumerate(reader.times):\n try:\n timestep = reader.times[step + 1] - time\n except IndexError:\n timestep = reader.times[-1] - reader.times[-2]\n # get the data\n data = reader.get_timestep(step, variables)\n writer.write_timestep(time, timestep, data, uncertain=False)\n writer.close()\n\n return kmz_file\n\n\n# Templates for the kmz files\n\ncaveat = (\"This trajectory was produced by GNOME (General NOAA Operational Modeling\",\n \" Environment), and should be used for educational and planning purposes only\",\n \"--not for a real response. In the event of an oil or chemical spill in U.S.\",\n \"waters, contact the U.S. Coast Guard National Response Center at 1-800-424-8802.\"\n )\n\n# The kml templates:\nheader_template = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<kml xmlns=\"http://www.opengis.net/kml/2.2\">\n <Document>\n <name>{kml_name}</name>\n <open>1</open>\n <description><![CDATA[<b>Valid for:</b> {valid_timestring}<br>\n <b>Issued:</b>{issued_timestring} <br>\n {caveat}]]>\n </description>\n\n <Style id=\"RedDotIcon\">\n <IconStyle>\n <scale>0.2</scale>\n <color>ff0000ff</color>\n <Icon>\n <href>dot.png</href>\n </Icon>\n <hotSpot x=\"0.5\" y=\"0.5\" xunits=\"fraction\" yunits=\"fraction\"/>\n </IconStyle>\n <LabelStyle>\n <color>00000000</color>\n </LabelStyle>\n </Style>\n\n <Style id=\"BlackDotIcon\">\n <IconStyle>\n <scale>0.2</scale>\n <Icon>\n <href>dot.png</href>\n </Icon>\n <color>ff000000</color>\n <hotSpot x=\"0.5\" y=\"0.5\" xunits=\"fraction\" yunits=\"fraction\"/>\n </IconStyle>\n <LabelStyle>\n <color>00000000</color>\n </LabelStyle>\n </Style>\n\n <Style id=\"YellowDotIcon\">\n <IconStyle>\n <scale>0.2</scale>\n <Icon>\n <href>dot.png</href>\n </Icon>\n <color>ff00ffff</color>\n <hotSpot x=\"0.5\" y=\"0.5\" xunits=\"fraction\" yunits=\"fraction\"/>\n </IconStyle>\n <LabelStyle>\n <color>00000000</color>\n </LabelStyle>\n </Style>\n\n <Style id=\"RedXIcon\">\n <IconStyle>\n <scale>0.2</scale>\n <color>ff0000ff</color>\n <Icon>\n <href>x.png</href>\n </Icon>\n <hotSpot x=\"0.5\" y=\"0.5\" xunits=\"fraction\" yunits=\"fraction\"/>\n </IconStyle>\n <LabelStyle>\n <color>00000000</color>\n </LabelStyle>\n </Style>\n\n <Style id=\"BlackXIcon\">\n <IconStyle>\n <scale>0.2</scale>\n <Icon>\n <href>x.png</href>\n </Icon>\n <color>ff000000</color>\n <hotSpot x=\"0.5\" y=\"0.5\" xunits=\"fraction\" yunits=\"fraction\"/>\n </IconStyle>\n <LabelStyle>\n <color>00000000</color>\n </LabelStyle>\n </Style>\n\n <Style id=\"YellowXIcon\">\n <IconStyle>\n <scale>0.2</scale>\n <Icon>\n <href>x.png</href>\n </Icon>\n <color>ff00ffff</color>\n <hotSpot x=\"0.5\" y=\"0.5\" xunits=\"fraction\" yunits=\"fraction\"/>\n </IconStyle>\n <LabelStyle>\n <color>00000000</color>\n </LabelStyle>\n </Style>\n\"\"\"\n\n\npoint_template = \"\"\" <Point>\n <altitudeMode>relativeToGround</altitudeMode>\n <coordinates>{:.6f},{:.6f},1.000000</coordinates>\n </Point>\n\"\"\"\n\n\ntimestep_header_template = \"\"\"<Folder>\n <name>{date_string}:{certain}</name>\n\"\"\"\n\none_run_header = \"\"\" <Placemark>\n <name>{certain} {status} Splots </name>\n <styleUrl>{style}</styleUrl>\n <TimeSpan id=\"ID\">\n <begin>{start_time}</begin> <!-- kml:dateTime -->\n <end>{end_time}</end> <!-- kml:dateTime -->\n </TimeSpan>\n <MultiGeometry>\n\"\"\"\none_run_footer = \"\"\" </MultiGeometry>\n </Placemark>\n\"\"\"\ntimestep_footer = \"\"\"\n</Folder>\n\"\"\"\n\n\ndef build_one_timestep(floating_positions,\n beached_positions,\n start_time,\n end_time,\n uncertain,\n ):\n\n data = {'certain': \"Uncertainty\" if uncertain else \"Best Guess\",\n 'start_time': start_time,\n 'end_time': end_time,\n 'date_string': start_time,\n }\n kml = []\n kml.append(timestep_header_template.format(**data))\n\n for status, positions in [('Floating', floating_positions),\n ('Beached', beached_positions)]:\n color = \"Red\" if uncertain else \"Yellow\"\n data['style'] = \"#\" + color + \"DotIcon\" if status == \"Floating\" else \"#\" + color + \"XIcon\"\n\n data['status'] = status\n kml.append(one_run_header.format(**data))\n\n for point in positions:\n kml.append(point_template.format(*point[:2]))\n kml.append(one_run_footer)\n kml.append(timestep_footer)\n\n return \"\".join(kml)\n\nfooter = \"\"\"\n </Document>\n</kml>\n\"\"\"\n# These icons (these are base64 encoded 3-pixel sized dots in a 32x32 transparent PNG)\n# these were encoded by the \"build_icons\" script\nDOT = \"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAJOgAACToB8GSSSgAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAEASURBVFiF7ZY7DsIwEEQfET09Ej11lFtwK06Re3ANlCoFPQpnoGJoHClCXpOPg10wUhonnnlyvF5vJJFSRdL0P0AOANsZcwqgAkrg6MZuQANcgdckN0ljn52kWlInW537ZjfWd2z4SVIbCP5U6+ZEAThLek4I7/V0cxcBnGaGDyGCK/Htn09ZdkutAnsiBFBHCO9VWzkb+XtBAdyB/Ywy9ekBHPCUqHUQVRHDcV6V74UFUEYMD3paAEdjfIm8nsl7gQVwWyHL62kBNCsAeD2zLcMXcIkUjvPyt+nASZj8KE7ejLJox1lcSIZ7IvqVzCrDkKJeSucARFW2veAP8DO9AXV74Qmb/4vgAAAAAElFTkSuQmCC\"\nX = \"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAN1wAADdcBQiibeAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAHKSURBVFiFrdXLq01hGMfx12HMzMCUU4zFQEYiROYkEkkpHbeTXI5LSDqHtomBEJGY+RMMGBlKKWVmaiDXzvExsN7admuv9azLU89k7ef5fb/ruhMSVuIy3uEOVhXH++w1mMEbnMFSpITl+Ob/+oOpHuFHiszh+oIVCbPGVx8Sh0vguaYT3lcIdJU4VAGHtwm3agTaShysgcMgYUNAoKnEgQAcVueFqR4l9mMhkHVJ8RbkPt6DxL4g/EreGQ3oIrE3CL86vFd2FidaSOzBfGDn+ihv3KU82UBidxB+o4xV9TBFJSKX/eY4Tt0TfSooUVWzVYzIO326A3yuLj/6YWkjcTuSHRVImG4AH0RzJ1K8PqSUFoKzn8KpQdNd+N3wFoT+OyLwnfjVEB6WqIPv6AAPSVTBt+NnR3itxDj4tiD8Hs52kSiDb8WPQOB9LCp2WkuMwrcE4Q8xMbJ7ro3EcMBmfA8EPCqBt5bIi5uC8McV8Nznm0gkLMPXwMKTADz3haDExoRjgcGnWByEN5EYJLyuGXrWAp57pib7Y8K1ioHnHeC5L1bkP0iYHPPjCyzpCK+SmMdkHliLl8XBVzjaIzz3Ov++H59xF+uR/gJmOo2+fdNArAAAAABJRU5ErkJggg==\"\n\n"
] |
[
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RamParameswaran/pyAFL
|
[
"d1aa027da43ae6f7e757d99b3b1203e27272785b"
] |
[
"pyAFL/teams/models.py"
] |
[
"from bs4 import BeautifulSoup\nimport pandas as pd\n\nfrom pyAFL.base.exceptions import LookupError\nfrom pyAFL.players.models import Player\nfrom pyAFL.requests import requests\n\n\nclass Team(object):\n \"\"\"\n A class to represent an AFL Team.\n\n Attributes\n ----------\n name : str\n team full name.\n players : list\n list of all time players who played for the team (pyAFL player objects).\n games : Pandas DataFrame\n dataframe containing results of all games played by the team.\n\n \"\"\"\n\n def __init__(self, name: str, url_identifier: str):\n \"\"\"\n Constructs the Team object.\n\n Parameters\n ----------\n name : str (required)\n name of the person in format \"[first] [last]\"\n url_identifier : str (required)\n string parameter used in AFLtables URLs to identify team. Note that the naming convention changes from team to team\n Examples: - for Adelaide: url_identifier = \"adelaide\" (see https://afltables.com/afl/stats/teams/adelaide.html)\n - for Greater Western Sydney: url_identifier = \"gws\" (see https://afltables.com/afl/stats/teams/gws.html)\n - for Western Bulldogs: url_identifier = \"bullldogs\" (see https://afltables.com/afl/stats/teams/bullldogs.html)\n\n \"\"\"\n\n self.name = name.title() # Convert to title case for URL string matching\n self.all_time_players_url = f\"https://afltables.com/afl/stats/teams/{url_identifier}.html\" # URL for all-time-players stats\n self.all_time_games_url = f\"https://afltables.com/afl/teams/{url_identifier}/allgames.html\" # URL to all-time-games stats\n\n def __repr__(self):\n return f\"<Team: {self.name}>\"\n\n def __str__(self):\n return self.name\n\n @property\n def players(self):\n \"\"\"\n NB - a network request will be made when this class property is called.\n \"\"\"\n\n return self._get_players()\n\n def _get_players(self):\n \"\"\"\n Returns a list of pyAFL.Player objects for all players contained in `self.all_time_players_url`\n\n Returns\n ----------\n players : list\n list of pyAFL.Player objects\n\n \"\"\"\n resp = requests.get(self.all_time_players_url)\n soup = BeautifulSoup(resp.text, \"html.parser\")\n\n player_table = soup.find(\"table\")\n player_table_body = player_table.find(\"tbody\")\n player_anchor_tags = player_table_body.findAll(\"a\")\n\n players = [\n Player(player.text, url=player.attrs.get(\"href\"))\n for player in player_anchor_tags\n ]\n\n return players\n\n def season_stats(self, year: int):\n \"\"\"\n Returns a Pandas dataframe detailing the season stats for the specified year.\n E.g. for Adelaide the table found at https://afltables.com/afl/stats/2020.html#1\n\n Parameters\n ----------\n year : int (required)\n year as a four-digit integer (e.g. 2019)\n\n Returns\n ----------\n season_stats : Pandas dataframe\n dataframe summarising individual player (and team total) stats for the specified year.\n\n \"\"\"\n season_player_stats_url = f\"https://afltables.com/afl/stats/{year}.html\"\n resp = requests.get(season_player_stats_url)\n\n if resp.status_code == 404:\n raise Exception(f\"Could not find season stats for year: {year}\")\n\n soup = BeautifulSoup(resp.text, \"html.parser\")\n team_tables = soup.findAll(\"table\")\n\n for table in team_tables:\n if table.find(\"th\"):\n if self.name in table.find(\"th\").text:\n df = pd.read_html(str(table))\n\n if df is None:\n raise LookupError(\n f\"Could not find season stats table for team {self.name} in year {year} at URL https://afltables.com/afl/stats/{year}.html\"\n )\n\n season_stats = df[0]\n season_stats.columns = season_stats.columns.droplevel()\n\n return season_stats\n\n @property\n def games(self):\n return self._get_games()\n\n def _get_games(self):\n \"\"\"\n Returns a Pandas dataframe listing every match contained in `self.all_time_games_url`\n\n Returns\n ----------\n games : Pandas dataframe\n dataframe listing all games played by the team. Contains results and match metadata.\n\n \"\"\"\n resp = requests.get(self.all_time_games_url)\n soup = BeautifulSoup(resp.text, \"html.parser\")\n\n seasons = soup.findAll(\"table\")\n\n dfs = []\n for season_html in seasons:\n df = pd.read_html(str(season_html))[0]\n df.columns = df.columns.droplevel(1)\n df = df.iloc[0:-2, :]\n dfs.append(df)\n\n games = pd.concat(dfs)\n games.index = pd.to_datetime(games.Date)\n games = games.sort_index()\n\n games = games.rename(\n columns={\"A\": \"Against\", \"F\": \"For\", \"R\": \"Result\", \"M\": \"Margin\"}\n )\n\n return games\n"
] |
[
[
"pandas.concat",
"pandas.to_datetime"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
yuzhouhe2000/video-conference-enhancer
|
[
"46aa130c0b7f02db5055c8d15877c8287c2276c7"
] |
[
"Processor/EQ/bp_filter.py"
] |
[
"#TESTING SOSFILT FUNCTION\n\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.io import wavfile\nfrom scipy.io.wavfile import write\n\nsample_rate, sig = wavfile.read('Testing_Files/test_mono.wav')\nsos = signal.ellip(4,5,40,[.009,0.45],btype='bandpass', output='sos')\nfiltered = signal.sosfilt(sos, sig)\nwrite_me = filtered / max(abs(filtered))\nwavfile.write('Testing_Files/SOS_bpf_test_mono.wav', filtered, 44100)\n\n"
] |
[
[
"scipy.io.wavfile.write",
"scipy.signal.sosfilt",
"scipy.io.wavfile.read",
"scipy.signal.ellip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
lcrh/falken
|
[
"7545431c7bfa34a9b45c2243cae40dbb58adefaa"
] |
[
"service/learner/brains/policies_test.py"
] |
[
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the policies module.\"\"\"\n\nfrom absl.testing import absltest\nfrom learner.brains import policies\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\n\n\nclass ConstantExamplePolicy(tf_policy.TFPolicy):\n\n def __init__(self):\n obs_spec = tensor_spec.TensorSpec([1], tf.float32)\n time_step_spec = ts.time_step_spec(obs_spec)\n action_spec = [\n tensor_spec.BoundedTensorSpec((), tf.int32, 0, 2),\n tensor_spec.BoundedTensorSpec((), tf.float32, 0, 1.0)]\n super().__init__(time_step_spec, action_spec)\n\n def _distribution(self, time_step, policy_state):\n actions = [\n # A categorical distribution with equal probability for 3 categories.\n tfp.distributions.Categorical(logits=[1, 1, 1]),\n # A standard normal distribution with mean 0 and stddev 1.\n tfp.distributions.Normal(0, 1)]\n return policy_step.PolicyStep(\n action=actions,\n state=policy_state,\n info=())\n\n\nclass PoliciesTest(absltest.TestCase):\n\n def test_greedy_float(self):\n wrapped = ConstantExamplePolicy()\n greedy_float = policies.GreedyFloatPolicy(wrapped)\n\n timestep = ts.transition(\n 1.0, # observation\n 0.0, # reward\n 0.99) # discount\n\n actions = []\n for _ in range(100):\n # Actions come back as a list containing an int tensor and a float tensor.\n # We cast both to float for easier data munging.\n actions.append(np.array(\n greedy_float.action(timestep, ()).action, dtype=np.float32))\n actions = np.array(actions, dtype=np.float32)\n\n # actions is 100 x 2, where actions[:, 0] are outputs from the int action\n # and actions[:,1] are outputs from the float action.\n int_action_values = set(actions[:, 0])\n float_action_values = set(actions[:, 1])\n\n # Stochastic action should have all three categories\n self.assertEqual(int_action_values, {0.0, 1.0, 2.0})\n # Greedy float action should have only the distribution mode\n self.assertEqual(float_action_values, {0.0})\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hsauro/PathwayModelingBook
|
[
"7faff28e2b79a6e2dc017be6f8e8270aaf31478b"
] |
[
"Chapter 10/crossValidation.py"
] |
[
"import numpy as np, pylab as plt\n\nnp.random.seed (127)\n\ndef gendata():\n y = []\n x = np.arange (-1, 2.5, 0.1)\n for value in x:\n y.append (1.1*value*value*value - 2.3*value*value + 1.1*value + 2 + np.random.normal (0, 2))\n return [x, y] \n\n[x, y] = gendata()\nxn = copy.copy (x); yn = copy.copy (y)\n\nav = []\npolylist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\nfig, ax = plt.subplots(nrows=4, ncols=4, figsize=(11,9))\n\ncount = 1\n# Try the differen models\nfor p in polylist:\n \n fu = []\n for i in range (len (x)):\n xfit = copy.copy (xn); yfit = copy.copy (yn)\n # Remove a data point\n xfit = np.delete (xfit, i)\n yfit = np.delete (yfit, i)\n f1 = np.polyfit (xfit, yfit, p)\n polyfunc = np.poly1d (f1)\n # Keep a record of the fitted model\n fu.append (polyfunc)\n rmse = (polyfunc (xn[i]) - yn[i])**2\n \n # Compute average rmse\n av.append (rmse/len (x))\n \n plt.subplot (4,4, count)\n plt.plot (xn, yn, 'o', markersize=4)\n for f in fu:\n plt.plot (x, f(x))\n count = count + 1\nplt.savefig ('cross.pdf')\n\nplt.plot (x, y, 'o'); plt.show() \nplt.plot (polylist[0:8], av[0:8])\nplt.savefig('avcross.pdf')\n"
] |
[
[
"numpy.polyfit",
"numpy.poly1d",
"numpy.random.seed",
"numpy.arange",
"numpy.delete",
"numpy.random.normal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
szWingLee/pytorch-CycleGAN-and-pix2pix
|
[
"6127c7ad361ff5545beb14a8b814cb81cf369f35",
"6127c7ad361ff5545beb14a8b814cb81cf369f35"
] |
[
"util/image_pool.py",
"scripts/edges/batch_hed.py"
] |
[
"import random\nimport torch\n\n\nclass ImagePool():\n \"\"\"This class implements an image buffer that stores previously generated images.\n\n This buffer enables us to update discriminators using a history of generated images\n rather than the ones produced by the latest generators.\n \"\"\"\n\n def __init__(self, pool_size):\n \"\"\"Initialize the ImagePool class\n\n Parameters:\n pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created\n \"\"\"\n self.pool_size = pool_size\n if self.pool_size > 0: # create an empty pool\n self.num_imgs = 0\n self.images = []\n\n def query(self, images):\n \"\"\"Return an image from the pool.\n\n Parameters:\n images: the latest generated images from the generator\n\n Returns images from the buffer.\n\n By 50/100, the buffer will return input images.\n By 50/100, the buffer will return images previously stored in the buffer,\n and insert the current images to the buffer.\n \"\"\"\n if self.pool_size == 0: # if the buffer size is 0, do nothing\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\n random_id = random.randint(0, self.pool_size - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else: # by another 50% chance, the buffer will return the current image\n return_images.append(image)\n return_images = torch.cat(return_images, 0) # collect all the images and return\n return return_images\n",
"# HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb\n# Step 1: download the hed repo: https://github.com/s9xie/hed\n# Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/\n# Step 3: put this script under {caffe_root}/examples/hed/\n# Step 4: run the following script:\n# python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/\n# The code sometimes crashes after computation is done. Error looks like \"Check failed: ... driver shutting down\". You can just kill the job.\n# For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script.\n# Step 5: run the MATLAB post-processing script \"PostprocessHED.m\"\n\n\nimport caffe\nimport numpy as np\nfrom PIL import Image\nimport os\nimport argparse\nimport sys\nimport scipy.io as sio\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='batch proccesing: photos->edges')\n parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str)\n parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel',\n default='./hed_pretrained_bsds.caffemodel', type=str)\n parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt',\n type=str)\n parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str)\n parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file',\n type=str)\n parser.add_argument('--border', dest='border', help='padding border', type=int, default=128)\n parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1)\n args = parser.parse_args()\n return args\n\n\nargs = parse_args()\nfor arg in vars(args):\n print('[%s] =' % arg, getattr(args, arg))\n# Make sure that caffe is on the python path:\ncaffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/\nsys.path.insert(0, caffe_root + 'python')\n\nif not os.path.exists(args.hed_mat_dir):\n print('create output directory %s' % args.hed_mat_dir)\n os.makedirs(args.hed_mat_dir)\n\nimgList = os.listdir(args.images_dir)\nnImgs = len(imgList)\nprint('#images = %d' % nImgs)\n\ncaffe.set_mode_gpu()\ncaffe.set_device(args.gpu_id)\n# load net\nnet = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)\n# pad border\nborder = args.border\n\nfor i in range(nImgs):\n if i % 500 == 0:\n print('processing image %d/%d' % (i, nImgs))\n im = Image.open(os.path.join(args.images_dir, imgList[i]))\n\n in_ = np.array(im, dtype=np.float32)\n in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect')\n\n in_ = in_[:, :, 0:3]\n in_ = in_[:, :, ::-1]\n in_ -= np.array((104.00698793, 116.66876762, 122.67891434))\n in_ = in_.transpose((2, 0, 1))\n # remove the following two lines if testing with cpu\n\n # shape for input (data blob is N x C x H x W), set data\n net.blobs['data'].reshape(1, *in_.shape)\n net.blobs['data'].data[...] = in_\n # run net and take argmax for prediction\n net.forward()\n fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :]\n # get rid of the border\n fuse = fuse[border:-border, border:-border]\n # save hed file to the disk\n name, ext = os.path.splitext(imgList[i])\n sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse})\n"
] |
[
[
"torch.unsqueeze",
"torch.cat"
],
[
"numpy.array",
"numpy.pad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
likelyzhao/pysot
|
[
"01de9d4817904c68b65ddb8aba47cbf3cb9b695a"
] |
[
"pysot/tracker/siamrpn_tracker.py"
] |
[
"# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom pysot.core.config import cfg\nfrom pysot.utils.anchor import Anchors\nfrom pysot.tracker.base_tracker import SiameseTracker\n\n\n\nclass SiamRPNTracker(SiameseTracker):\n def __init__(self, model, isyolo = False):\n super(SiamRPNTracker, self).__init__()\n self.score_size = (cfg.TRACK.INSTANCE_SIZE - cfg.TRACK.EXEMPLAR_SIZE) // \\\n cfg.ANCHOR.STRIDE + 1 + cfg.TRACK.BASE_SIZE\n self.anchor_num = len(cfg.ANCHOR.RATIOS) * len(cfg.ANCHOR.SCALES)\n hanning = np.hanning(self.score_size)\n window = np.outer(hanning, hanning)\n self.window = np.tile(window.flatten(), self.anchor_num)\n self.anchors = self.generate_anchor(self.score_size)\n self.model = model\n self.model.eval()\n self.isyolo = isyolo\n\n def generate_anchor(self, score_size):\n anchors = Anchors(cfg.ANCHOR.STRIDE,\n cfg.ANCHOR.RATIOS,\n cfg.ANCHOR.SCALES)\n anchor = anchors.anchors\n x1, y1, x2, y2 = anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]\n anchor = np.stack([(x1+x2)*0.5, (y1+y2)*0.5, x2-x1, y2-y1], 1)\n total_stride = anchors.stride\n anchor_num = anchor.shape[0]\n anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))\n ori = - (score_size // 2) * total_stride\n xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],\n [ori + total_stride * dy for dy in range(score_size)])\n xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \\\n np.tile(yy.flatten(), (anchor_num, 1)).flatten()\n anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)\n return anchor\n\n def _convert_bbox(self, delta, anchor):\n delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)\n delta = delta.data.cpu().numpy()\n\n delta[0, :] = delta[0, :] * anchor[:, 2] + anchor[:, 0]\n delta[1, :] = delta[1, :] * anchor[:, 3] + anchor[:, 1]\n delta[2, :] = np.exp(delta[2, :]) * anchor[:, 2]\n delta[3, :] = np.exp(delta[3, :]) * anchor[:, 3]\n return delta\n\n def _convert_bbox_yolo(self, box, info_img):\n h, w, nh, nw, dx, dy = info_img\n y1, x1, y2, x2 = box\n box_h = ((y2 - y1) / nh) * h\n box_w = ((x2 - x1) / nw) * w\n y1 = ((y1 - dy) / nh) * h\n x1 = ((x1 - dx) / nw) * w\n label = [y1, x1, y1 + box_h, x1 + box_w]\n return label\n\n def _convert_score(self, score):\n score = score.permute(1, 2, 3, 0).contiguous().view(2, -1).permute(1, 0)\n score = F.softmax(score, dim=1).data[:, 1].cpu().numpy()\n return score\n\n def _bbox_clip(self, cx, cy, width, height, boundary):\n cx = max(0, min(cx, boundary[1]))\n cy = max(0, min(cy, boundary[0]))\n width = max(10, min(width, boundary[1]))\n height = max(10, min(height, boundary[0]))\n return cx, cy, width, height\n\n def init(self, img, bbox):\n \"\"\"\n args:\n img(np.ndarray): BGR image\n bbox: (x, y, w, h) bbox\n \"\"\"\n self.center_pos = np.array([bbox[0]+(bbox[2]-1)/2,\n bbox[1]+(bbox[3]-1)/2])\n self.size = np.array([bbox[2], bbox[3]])\n\n # calculate z crop size\n w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)\n h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)\n s_z = round(np.sqrt(w_z * h_z))\n\n # calculate channle average\n self.channel_average = np.mean(img, axis=(0, 1))\n\n # get crop\n z_crop, padinfo = self.get_subwindow(img, self.center_pos,\n cfg.TRACK.EXEMPLAR_SIZE,\n s_z, self.channel_average)\n self.model.template(z_crop)\n\n def track(self, img):\n \"\"\"\n args:\n img(np.ndarray): BGR image\n return:\n bbox(list):[x, y, width, height]\n \"\"\"\n w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)\n h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)\n s_z = np.sqrt(w_z * h_z)\n scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z\n s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)\n x_crop , padinfo = self.get_subwindow(img, self.center_pos,\n\n cfg.TRACK.INSTANCE_SIZE,\n round(s_x), self.channel_average)\n\n outputs = self.model.track(x_crop)\n\n if not self.isyolo:\n score = self._convert_score(outputs['cls'])\n pred_bbox = self._convert_bbox(outputs['loc'], self.anchors)\n\n def change(r):\n return np.maximum(r, 1. / r)\n\n def sz(w, h):\n pad = (w + h) * 0.5\n return np.sqrt((w + pad) * (h + pad))\n\n # scale penalty\n s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) /\n (sz(self.size[0]*scale_z, self.size[1]*scale_z)))\n\n # aspect ratio penalty\n r_c = change((self.size[0]/self.size[1]) /\n (pred_bbox[2, :]/pred_bbox[3, :]))\n penalty = np.exp(-(r_c * s_c - 1) * cfg.TRACK.PENALTY_K)\n pscore = penalty * score\n\n # window penalty\n pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \\\n self.window * cfg.TRACK.WINDOW_INFLUENCE\n best_idx = np.argmax(pscore)\n\n else:\n score = outputs['cls']\n pred_bbox = outputs['loc']\n\n\n if not self.isyolo:\n bbox = pred_bbox[:, best_idx] / scale_z\n lr = penalty[best_idx] * score[best_idx] * cfg.TRACK.LR\n cx = bbox[0] + self.center_pos[0]\n cy = bbox[1] + self.center_pos[1]\n else:\n bbox = pred_bbox[:, 0] \n #bbox = self.clip_pad(pred_bbox,padinfo)\n bbox_crop = bbox / cfg.TRACK.EXEMPLAR_SIZE * cfg.TRACK.INSTANCE_SIZE\n scale_zz = cfg.TRACK.EXEMPLAR_SIZE / s_x\n #bbox = bbox / scale_zz\n\n #lr = score[0] * cfg.TRACK.LR\n lr = cfg.TRACK.LR\n best_idx =0\n # import cv2\n # cv2.namedWindow(\"video_name\")\n # im_draw = x_crop.cpu().squeeze().numpy().astype(np.uint8).transpose(1, 2, 0).copy()\n #\n # cv2.rectangle(im_draw, (int(bbox_crop[0]), int(bbox_crop[1])),\n # (int(bbox_crop[0] + bbox_crop[2]) , int(bbox_crop[1] + bbox_crop[3])), (255, 0, 0), 3)\n # cv2.imshow(\"video_name\", im_draw)\n # cv2.imwrite(\"test.jpg\",im_draw)\n # cv2.waitKey(4000)\n\n cx = ((bbox[0] + bbox[0] + bbox[2])/2 - cfg.TRACK.EXEMPLAR_SIZE/2) + self.center_pos[0]\n cy = ((bbox[1] + bbox[1] + bbox[3])/2 - cfg.TRACK.EXEMPLAR_SIZE/2) + self.center_pos[1]\n\n # smooth bbox\n width = self.size[0] * (1 - lr) + bbox[2] /scale_zz * lr\n height = self.size[1] * (1 - lr) + bbox[3]/ scale_zz * lr\n\n # clip boundary\n cx, cy, width, height = self._bbox_clip(cx, cy, width,\n height, img.shape[:2])\n\n # udpate state\n self.center_pos = np.array([cx, cy])\n self.size = np.array([width, height])\n\n bbox = [cx - width / 2,\n cy - height / 2,\n width,\n height]\n best_score = score[best_idx]\n return {\n 'bbox': bbox,\n 'best_score': best_score\n }\n"
] |
[
[
"torch.nn.functional.softmax",
"numpy.maximum",
"numpy.sqrt",
"numpy.tile",
"numpy.stack",
"numpy.argmax",
"numpy.mean",
"numpy.hanning",
"numpy.exp",
"numpy.outer",
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
myprogrammerpersonality/BlackBoxOptimizer
|
[
"e0050480c2d1a79092429d2da99f7a8f1851c07d"
] |
[
"Functions_Final_v3_1.py"
] |
[
"# import packages\nimport numpy as np\nimport pandas as pd\nimport os\nfrom itertools import product\nfrom collections.abc import Iterable\n\n\ndef allowed_output(value, reaction_vol_nl=20000, drop_size_nl=100, verbose=0):\n \"\"\"Based on high ,low and stock concentrations and droplet size calculate how many combinations is possible\n\n Parameters\n ----------\n value : tuple\n (low, high, stock concentration)\n\n Returns\n -------\n calculated_concs :\n a list of possible concentrations\n \n calculated_vols :\n a list of possible volumes\n \"\"\"\n\n if value['Conc_Values']:\n if isinstance(value['Conc_Stock'], Iterable):\n drop_nums = [i * reaction_vol_nl / (drop_size_nl * value['Conc_Stock'][find_stock(value['Conc_Values'], value['Conc_Stock'], i)[0]]) for i in value['Conc_Values']]\n calculated_concs = value['Conc_Values']\n else:\n drop_nums = [i * reaction_vol_nl / (drop_size_nl * value['Conc_Stock']) for i in value['Conc_Values']]\n calculated_concs = value['Conc_Values']\n\n else:\n drop_nums = list(range(int((value['Conc_Min'] * reaction_vol_nl) / (drop_size_nl * value['Conc_Stock'])),\n int((value['Conc_Max'] * reaction_vol_nl) / (drop_size_nl * value['Conc_Stock'])) + 1))\n\n calculated_concs = [drop_num * value['Conc_Stock'] * drop_size_nl / reaction_vol_nl for drop_num in drop_nums]\n\n if verbose:\n print('drops :', drop_nums)\n print('volumes :', [i * drop_size_nl for i in drop_nums])\n print('possible_concentrations :', calculated_concs)\n else:\n return calculated_concs, [i * drop_size_nl for i in drop_nums]\n\n\ndef percentage_possible(data, threshold=40):\n \"\"\"Based on threshold volumes calculate how many combinations of all metabolite is possible to make\n\n Parameters\n ----------\n data : dict\n {'meatbolite name':[possible volumes], ...}\n \n Returns\n -------\n percentage possible : float \n total : int\n total number of combination (includes forbidden one)\n \"\"\"\n lists = list(data.values())\n\n m = [len(i) for i in data.values()]\n\n total = np.prod(np.array([len(i) for i in data.values()]))\n possible = 0\n\n for items in product(*lists):\n if sum(items) <= threshold:\n possible += 1\n \n return (possible/total*100), total\n\ndef find_stock(conc_values, conc_stocks, this_value):\n \"\"\"this function find each concentration value belongs to wich stock concentration for metabolites with multiple stocks\n\n Parameters\n ----------\n conc_values : list\n a list of all possible concentration\n \n conc_stocks : list\n a list of all stocks concentration\n \n this_value : float, int\n concentration value that we find to find it's stock\n \n Returns\n -------\n i:\n index of found stock\n \n out:\n value of found stock\n \"\"\"\n num = len(conc_stocks)\n avg = len(conc_values) / float(num)\n out = []\n last = 0.0\n\n while last < len(conc_values):\n out.append(conc_values[int(last):int(last + avg)])\n last += avg\n\n for i, value in enumerate(out):\n if this_value in value:\n return i, out\n\n# random combination generator function_v3.0\ndef random_combination_generator(concentrations_limits, number_of_combination=100, reaction_vol_nl=10000,\n max_nl=None, drop_size_nl=100, check_repeat=True, rounded=2, verbose=0, make_csv=False, return_df=False):\n \"\"\"this function make random combination that is safe (e.g. dont make too much or low concentrated, not excecutable based on drop size, not repetitive)\n\n Parameters\n ----------\n concentrations_limits : dict\n {'name of metabolite': {'Conc_Min': #, 'Conc_Max': #, 'Conc_Values': #, 'Conc_Stock': #, 'Alternatives': #}, ...}\n \n Returns\n -------\n data : pandas.DataFrame\n a dataframe as consist of number_of_combination of random combinations\n \"\"\"\n \n # generating random combinations\n combinations = []\n data_point = 0\n while data_point < number_of_combination:\n input_data = []\n input_vol = []\n # verbosity\n if (data_point % 10000 == 0) and verbose:\n print(data_point)\n\n # generation of random input\n for key, value in concentrations_limits.items():\n # Manual Concentration Value Generation\n if value['Conc_Values']:\n # With Alternatives\n if value['Alternatives']:\n num_alternative = len(value['Alternatives'])\n choice_alternative = np.random.randint(0, num_alternative)\n choice_list = [0 for i in range(num_alternative)]\n choice_list[choice_alternative] = 1\n\n choice_conc = np.random.choice(value['Conc_Values'])\n input_data.append(choice_conc)\n input_data += choice_list\n if isinstance(value['Conc_Stock'], Iterable):\n choice_stock, _ = find_stock(value['Conc_Values'], value['Conc_Stock'], choice_conc)\n input_vol.append(choice_conc/value['Conc_Stock'][choice_stock]*reaction_vol_nl)\n else:\n input_vol.append(choice_conc/value['Conc_Stock']*reaction_vol_nl)\n\n # Without Alternatives\n else:\n choice_conc = np.random.choice(value['Conc_Values'])\n input_data.append(choice_conc)\n if isinstance(value['Conc_Stock'], Iterable):\n choice_stock, _ = find_stock(value['Conc_Values'], value['Conc_Stock'], choice_conc)\n input_vol.append(choice_conc/value['Conc_Stock'][choice_stock]*reaction_vol_nl)\n else:\n input_vol.append(choice_conc/value['Conc_Stock']*reaction_vol_nl)\n\n # Auto Concentration Value Generation\n else:\n # With Alternatives\n if value['Alternatives']:\n num_alternative = len(value['Alternatives'])\n choice_alternative = np.random.randint(0, num_alternative)\n choice_list = [0 for i in range(num_alternative)]\n choice_list[choice_alternative] = 1\n\n drop_num = np.random.randint(round(value['Conc_Min'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']),\n round(value['Conc_Max'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']) + 1)\n\n recalculated_conc = drop_num * value['Conc_Stock'] * drop_size_nl / reaction_vol_nl\n input_data.append(recalculated_conc)\n input_data += choice_list\n input_vol.append(recalculated_conc/value['Conc_Stock']*reaction_vol_nl)\n\n # Without Alternatives\n else:\n drop_num = np.random.randint(round(value['Conc_Min'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']),\n round(value['Conc_Max'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']) + 1)\n\n recalculated_conc = drop_num * value['Conc_Stock'] * drop_size_nl / reaction_vol_nl\n input_data.append(recalculated_conc)\n input_vol.append(recalculated_conc/value['Conc_Stock']*reaction_vol_nl)\n \n # Checks\n if check_repetitive and max_nl:\n if input_data not in combinations and sum(input_vol)<= max_nl:\n combinations.append(input_data)\n data_point += 1\n elif check_repetitive and not max_nl:\n if input_data not in combinations:\n combinations.append(input_data)\n data_point += 1\n elif not check_repetitive and max_nl:\n if sum(input_vol)<= max_nl:\n combinations.append(input_data)\n data_point += 1\n else:\n combinations.append(input_data)\n data_point += 1\n\n # make column name:\n columns_name = []\n for key, value in concentrations_limits.items():\n if not value['Alternatives']:\n columns_name.append(key)\n else:\n columns_name.append(key)\n alternative_name = ['{}_{}'.format(key, i) for i in value['Alternatives']]\n columns_name += alternative_name\n\n # making csv file\n if make_csv:\n data = pd.DataFrame(np.array(combinations), columns=columns_name)\n data.to_csv('Random_Combination_1.csv', index=False)\n\n # making dataframe\n if return_df:\n data = pd.DataFrame(np.array(combinations), columns=columns_name)\n return data\n\n return np.array(combinations)\n\n# transform concentration DataFrame to volume (nanolitre) DataFrame\ndef concentration_to_volume(concentrations, concentrations_limits, reaction_mixture_vol_nl=10000,\n fixed_parts={'Lysate': 0.33, 'Saline': 0.1}, round_deg=1, check_water=True):\n \"\"\"Transform concentrations dataframe to volumes dataframe\n option: add a fixed volumes to all combinations like Lysate\n caution: concentrations unit and metabolite name in concentrations and concentrations_limits must be the same.\n\n Parameters\n ----------\n concentrations : pandas.DataFrame\n random_combination_generator output\n \n Returns\n -------\n data : pandas.DataFrame\n a dataframe same as input in shape but volumes data\n \"\"\"\n\n # make a copy of original dataframe to avoid further change than can affect that\n data = concentrations.copy(deep=True)\n data_all = data.copy(deep=True)\n data = data[[i for i in data.columns if '_' not in i]]\n data *= reaction_mixture_vol_nl\n\n for metabolite_name, value in concentrations_limits.items():\n if isinstance(value['Conc_Stock'], Iterable):\n print()\n data[metabolite_name] = [round(data[metabolite_name][i] / value['Conc_Stock'][find_stock(value['Conc_Values'], value['Conc_Stock'], data_all[metabolite_name][i])[0]], round_deg) for i in range(len(data[metabolite_name]))]\n else:\n data[metabolite_name] = [round(data[metabolite_name][i] / value['Conc_Stock'], round_deg) for i in range(len(data[metabolite_name]))]\n\n # add fix parts\n if fixed_parts:\n for key, value in fixed_parts.items():\n data[key] = reaction_mixture_vol_nl * value\n\n # add water to reach the reaction_mixture_vol_nl\n data['water'] = reaction_mixture_vol_nl - data.sum(axis=1)\n\n # for low stock concentration that is not possible to make, raise an error\n # stock conc should set in a way that dont raise this error to avoid further debugging\n if check_water and not all(data['water'] >= 0): raise Exception(\"Oops, too concentrated combination!\")\n\n # add alternative\n # make columns name list:\n columns_name = []\n Type_dic = {}\n Stock_dic = {}\n for key, value in concentrations_limits.items():\n if value['Alternatives']:\n columns_name.append(key)\n columns_name.append('{}_Type'.format(key))\n Type_dic[key] = []\n else:\n columns_name.append(key)\n if isinstance(value['Conc_Stock'], Iterable):\n columns_name.append('{}_Stock_Type'.format(key))\n Stock_dic[key] = []\n\n # Alternatives\n for key in Type_dic.keys():\n data_type = data_all[[i for i in data_all.columns if '{}_'.format(key) in i]]\n for i in data_type.values:\n Type_dic[key].append(concentrations_limits[key]['Alternatives'][np.where(i == 1.0)[0][0]])\n\n Type_list = list(Type_dic.keys())\n for key in Type_list:\n Type_dic['{}_Type'.format(key)] = Type_dic.pop(key)\n\n # Stock\n for key in Stock_dic.keys():\n Stock_dic[key] = [concentrations_limits[key]['Conc_Stock'][find_stock(concentrations_limits[key]['Conc_Values'], concentrations_limits[key]['Conc_Stock'], i)[0]] for i in data_all[key]]\n \n Stock_list = list(Stock_dic.keys())\n for key in Stock_list:\n Stock_dic['{}_Stock_Type'.format(key)] = Stock_dic.pop(key)\n\n data_final = pd.concat([data, pd.DataFrame(Type_dic), pd.DataFrame(Stock_dic)], axis=1)\n return data_final[columns_name + list(fixed_parts.keys()) + ['water']]\n\ndef day_finder(file, file_format='csv'):\n \"\"\"Find the first notcompleted day\n\n Parameters\n ----------\n file : \n for now it can only be 'Results'\n \n Returns\n -------\n i : int\n the first notcompleted day\n \"\"\"\n i = 1\n while True:\n if not os.path.isfile('{}_{}.{}'.format(file, i, file_format)):\n return i\n i += 1\n\n\ndef result_preprocess(day, desired_cols, range=20):\n \"\"\"Preprocess Results.csv file to get desired columns and rows\n caution: the target column name MUST be 'yield'\n \n Parameters\n ----------\n day : \n Results_day.csv\n \n desired_cols :\n name of columns that you want from the results file\n \n Returns\n -------\n data_m:\n data in range\n label_m:\n label in range\n data_specials:\n other data\n label_specials:\n other label\n \"\"\"\n results = pd.read_csv('Results_{}.csv'.format(day, day))\n\n # m number pipeline\n data_m = results[desired_cols].iloc[:range, :]\n label_m = results[['yield']].iloc[:range, :]\n\n # reference, control and specials\n data_specials = results[desired_cols].iloc[range:, :]\n label_specials = results[['yield']].iloc[range:, :]\n\n return data_m, label_m, data_specials, label_specials\n\n\ndef check_repetitive(combination, df_main):\n \"\"\"Check to avoid repetitive combinations\n \n Parameters\n ----------\n combination : \n combinations that want to be checked\n \n df_main : pandas.DataFrame\n source dataframe\n \n Returns\n -------\n boolean:\n True: it exist in df_main\n False: it's not\n \"\"\"\n comparison_df = df_main.merge(combination, indicator=True, how='outer')\n if 'both' in comparison_df._merge.unique():\n return False\n else:\n return True\n\n\ndef bayesian_optimization(regressors_list,\n data, label,\n concentrations_limits,\n final_order,\n df_main,\n reaction_vol_nl=20000, max_nl=13200, drop_size_nl=100,\n exploitation=1, exploration=1, test_size=100, pool_size=100000, verbose=0, day=1,\n days_range=[20, 20, 20, 20, 20, 20, 20, 20, 20, 20]):\n \"\"\"Main bayesian optimization function\n \n Parameters\n ----------\n regressors_list : \n a list consists of more than one regressor that has .fit and .predict feature\n \n data : pandas.DataFrame\n all previous day data\n\n label : pandas.DataFrame\n all previous day label\n \n exploitation : 1\n coeficient of focus on higher yield query\n \n exploration : 1\n coefficient of focus on more informative query\n \n test_size : 100\n output combinations number\n \n pool_size : 100000\n how many random combinations to ask from regressor list each round\n caution: this parameter highly affect executions time\n \n Returns\n -------\n chosen_combinations: pandas.DataFrame\n combinations that expected to improve yield\n \"\"\"\n # first fit training data on our models\n for regressor in regressors_list:\n regressor.fit(data.values, label.values)\n\n # make random test data\n df_1 = random_combination_generator(concentrations_limits, number_of_combination=pool_size,\n reaction_vol_nl=reaction_vol_nl,\n max_nl=max_nl, drop_size_nl=drop_size_nl, make_csv=False, return_df=True)\n desired_cols = list(df_1.columns)\n\n df_temp = df_1.copy(deep=True)\n\n # Upper Confidence Bound\n for index, regressor in enumerate(regressors_list):\n df_1['pred_yield_{}'.format(index)] = regressor.predict(df_temp.values)\n\n df_1['regressors_std'] = df_1[[str(i) for i in df_1.columns if 'pred_yield' in str(i)]].std(axis=1)\n df_1['mean_vote'] = df_1[[str(i) for i in df_1.columns if 'pred_yield' in str(i)]].mean(axis=1)\n df_1['UCB'] = exploitation * df_1['mean_vote'] + exploration * df_1['regressors_std']\n df_1 = df_1.sort_values(['UCB'], ascending=False)\n\n # check to don`t make repeated combination but it is not likely\n\n chosen_combinations = pd.DataFrame(columns=desired_cols)\n num = 0\n for i in df_1[desired_cols].values:\n temp_combination = pd.DataFrame([i], columns=desired_cols)\n if check_repetitive(temp_combination, df_main):\n num += 1\n chosen_combinations = pd.concat([chosen_combinations, temp_combination]).reset_index(drop=True)\n if num == test_size:\n break\n\n return chosen_combinations[final_order]\n\n# ECHO functions\ndef put_volumes_to_384_wells(volumes_array, starting_well='A1', vertical=False, make_csv=False):\n \"\"\"Make a dataframe as a 384 well plate for each metabolite\n \n Parameters\n ----------\n volumes_array : \n a dataframe with columns are component, each row vol of that components (e.g. volumes.csv) \n \n starting_well : 'A1'\n name of the well in 384 well plate that you want to start filling\n \n vertical:\n if True it will fill the plate column by column top down\n if False it will fill the plate row by row left to right\n \n Returns\n -------\n all_dataframe:\n a list consists of one dataframe for each of metabolite that shows appropriate 384 well plate\n \n named_volumes:\n one separate dataframe that add well name to volume dataframe\n \"\"\"\n if len(volumes_array) > 384: raise ValueError\n\n all_dataframe = {}\n rows_name = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']\n\n if not vertical:\n from_well = rows_name.index(starting_well[0]) * 24 + int(starting_well[1:]) - 1\n # make each metabolite`s dataframe and add it to dict\n for metabolite_name in volumes_array.columns:\n # first make an all zero dataframe\n dataframe = pd.DataFrame(0.0, index=rows_name, columns=range(1, 25))\n # add data one by one in each row\n # (0, 0)--------->(0,23)\n # .......................\n # (15,0)--------->(15,23)\n for index, value in enumerate(volumes_array[metabolite_name]):\n index += from_well\n dataframe.iloc[index // 24, index % 24] = value\n\n all_dataframe[metabolite_name] = dataframe\n\n # make named volumes dataframe\n named_volumes = volumes_array.copy(deep=True)\n names = ['{}{}'.format(rows_name[index // 24], index % 24 + 1) for index in named_volumes.index]\n named_volumes['well_name'] = names\n\n if vertical:\n from_well = rows_name.index(starting_well[0]) + (int(starting_well[1:]) - 1) * 16\n # make each metabolite`s dataframe and add it to dict\n for metabolite_name in volumes_array.columns:\n # first make an all zero dataframe\n dataframe = pd.DataFrame(0.0, index=rows_name, columns=range(1, 25))\n # add data one by one in each column\n # (0, 0)---->-----(0,23)\n # ||||||..........||||||\n # (15,0)---->-----(15,23)\n for index, value in enumerate(volumes_array[metabolite_name]):\n index += from_well\n dataframe.iloc[index % 16, index // 16] = value\n\n all_dataframe[metabolite_name] = dataframe\n\n # make named volumes dataframe\n named_volumes = volumes_array.copy(deep=True)\n names = ['{}{}'.format(rows_name[(index + from_well) % 16], (index + from_well) // 16 + 1) for index in\n named_volumes.index]\n named_volumes['well_name'] = names\n\n # notice that this function output two value\n return all_dataframe, named_volumes\n\n\n# make source to destination dataframe for ECHO machine\ndef source_to_destination(named_volumes, desired_order=None, reset_index=True, check_zero=False):\n \"\"\"Make a dataframe as a 384 well plate for each metabolite\n \n Parameters\n ----------\n named_volume : \n second output of put_volumes_to_384_wells function\n \n Returns\n -------\n all_sources:\n separate dataframe for each metabolite that appended in a dict\n \n aggregated:\n aggregated all_sources to one csv file by your desired order\n \"\"\"\n all_sources = {}\n for metabolite_name in named_volumes.drop(columns=['well_name']):\n transfers = {'Source_Plate_Barcode': [], 'Source_Well': [], 'Destination_Plate_Barcode': [],\n 'Destination_Well': [], 'Transfer_Volume': []}\n for index in range(len(named_volumes)):\n if named_volumes.loc[index, metabolite_name] > 0 or check_zero == False:\n transfers['Source_Plate_Barcode'].append('Plate1')\n transfers['Source_Well'].append('{} well'.format(metabolite_name))\n transfers['Destination_Plate_Barcode'].append('destPlate1')\n transfers['Destination_Well'].append(named_volumes.loc[index, 'well_name'])\n transfers['Transfer_Volume'].append(named_volumes.loc[index, metabolite_name])\n transfers = pd.DataFrame(transfers)\n\n all_sources[metabolite_name] = transfers\n\n # aggregate all dataframe\n aggregated = pd.concat(all_sources.values())\n\n if desired_order:\n aggregated = pd.concat([all_sources[i] for i in desired_order])\n\n if reset_index:\n aggregated = aggregated.reset_index(drop=True)\n\n return all_sources, aggregated\n"
] |
[
[
"pandas.concat",
"numpy.random.choice",
"pandas.DataFrame",
"numpy.array",
"numpy.where",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
junchenfeng/diagnosis_tracing
|
[
"4e26e2ad0c7abc547f22774b6c9c299999a152c3"
] |
[
"StateTracing/train_helper.py"
] |
[
"# -*- coding: utf-8 -*-\r\nfrom torch import optim\r\nfrom torch import tensor,save\r\nfrom torch import cuda\r\nfrom torch.nn.utils import clip_grad_value_\r\n\r\nfrom dataloader import read_data,DataLoader,load_init\r\nfrom cdkt import CDKT\r\n\r\n\r\nuse_cuda = True\r\n\r\nif use_cuda:\r\n cuda.empty_cache()\r\n\r\n\"\"\" training mode\"\"\"\r\nresults = []\r\nf = 3\r\n\r\nmodel = CDKT()\r\nif use_cuda:\r\n model = model.cuda()\r\n\r\noptimizer = optim.Adam(model.parameters(),5*1e-4)\r\nDL = DataLoader(read_data(f'/data/train.{f}.dat'),load_init())\r\nfor r in range(10): # 20-epochs\r\n i = 0\r\n for x,y in DL.samples(72):\r\n X = tensor(x)\r\n Y = tensor(y)\r\n if use_cuda:\r\n X = X.cuda()\r\n Y = Y.cuda()\r\n loss = model.forward(X,Y,True)\r\n \r\n optimizer.zero_grad()\r\n clip_grad_value_(model.parameters(),10)\r\n loss.backward()\r\n optimizer.step()\r\n \r\n i += 1\r\n if i%100 == 0:\r\n loss_val = loss.data.to('cpu').numpy()\r\n print(f'{r:5d}--{i:5d}--{loss_val:.3f}')\r\n \r\n loss_val = loss.data.to('cpu').numpy()\r\n print(f'{r:5d}--{i:5d}--{loss_val:.3f}') \r\n\r\n\"\"\"on testing \"\"\"\r\nresults = []\r\nDL = DataLoader(read_data(f'/data/test.{f}.dat'),load_init())\r\nfor x,y in DL.samples(100):\r\n X = tensor(x)\r\n Y = tensor(y)\r\n if use_cuda:\r\n X = X.cuda()\r\n Y = Y.cuda()\r\n acc = model.forward(X,Y,False)\r\n results.append(acc.tolist())\r\n\r\ntotal_acc = sum(results) / len(results)\r\nprint(total_acc) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"torch.cuda.empty_cache",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nawar29/dash-sample-apps
|
[
"c0ebb97a1c3afd9955f9a83f585e0bda200f4011"
] |
[
"apps/dash-oil-and-gas/app.py"
] |
[
"# Import required libraries\nimport pickle\nimport copy\nimport pathlib\nimport dash\nimport math\nimport datetime as dt\nimport pandas as pd\nfrom dash.dependencies import Input, Output, State, ClientsideFunction\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n# Multi-dropdown options\nfrom controls import COUNTIES, WELL_STATUSES, WELL_TYPES, WELL_COLORS\n\n# get relative data folder\nPATH = pathlib.Path(__file__).parent\nDATA_PATH = PATH.joinpath(\"data\").resolve()\n\napp = dash.Dash(\n __name__, meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}]\n)\nserver = app.server\n\n# Create controls\ncounty_options = [\n {\"label\": str(COUNTIES[county]), \"value\": str(county)} for county in COUNTIES\n]\n\nwell_status_options = [\n {\"label\": str(WELL_STATUSES[well_status]), \"value\": str(well_status)}\n for well_status in WELL_STATUSES\n]\n\nwell_type_options = [\n {\"label\": str(WELL_TYPES[well_type]), \"value\": str(well_type)}\n for well_type in WELL_TYPES\n]\n\n\n# Load data\ndf = pd.read_csv(DATA_PATH.joinpath(\"wellspublic.csv\"), low_memory=False)\ndf[\"Date_Well_Completed\"] = pd.to_datetime(df[\"Date_Well_Completed\"])\ndf = df[df[\"Date_Well_Completed\"] > dt.datetime(1960, 1, 1)]\n\ntrim = df[[\"API_WellNo\", \"Well_Type\", \"Well_Name\"]]\ntrim.index = trim[\"API_WellNo\"]\ndataset = trim.to_dict(orient=\"index\")\n\npoints = pickle.load(open(DATA_PATH.joinpath(\"points.pkl\"), \"rb\"))\n\n\n# Create global chart template\nmapbox_access_token = \"pk.eyJ1IjoiamFja2x1byIsImEiOiJjajNlcnh3MzEwMHZtMzNueGw3NWw5ZXF5In0.fk8k06T96Ml9CLGgKmk81w\"\n\nlayout = dict(\n autosize=True,\n automargin=True,\n margin=dict(l=30, r=30, b=20, t=40),\n hovermode=\"closest\",\n plot_bgcolor=\"#F9F9F9\",\n paper_bgcolor=\"#F9F9F9\",\n legend=dict(font=dict(size=10), orientation=\"h\"),\n title=\"Satellite Overview\",\n mapbox=dict(\n accesstoken=mapbox_access_token,\n style=\"light\",\n center=dict(lon=-78.05, lat=42.54),\n zoom=7,\n ),\n)\n\n# Create app layout\napp.layout = html.Div(\n [\n dcc.Store(id=\"aggregate_data\"),\n # empty Div to trigger javascript file for graph resizing\n html.Div(id=\"output-clientside\"),\n html.Div(\n [\n html.Div(\n [\n html.Img(\n src=app.get_asset_url(\"dash-logo.png\"),\n id=\"plotly-image\",\n style={\n \"height\": \"60px\",\n \"width\": \"auto\",\n \"margin-bottom\": \"25px\",\n },\n )\n ],\n className=\"one-third column\",\n ),\n html.Div(\n [\n html.Div(\n [\n html.H3(\n \"New York Oil and Gas\",\n style={\"margin-bottom\": \"0px\"},\n ),\n html.H5(\n \"Production Overview\", style={\"margin-top\": \"0px\"}\n ),\n ]\n )\n ],\n className=\"one-half column\",\n id=\"title\",\n ),\n html.Div(\n [\n html.A(\n html.Button(\"Learn More\", id=\"learn-more-button\"),\n href=\"https://plot.ly/dash/pricing/\",\n )\n ],\n className=\"one-third column\",\n id=\"button\",\n ),\n ],\n id=\"header\",\n className=\"row flex-display\",\n style={\"margin-bottom\": \"25px\"},\n ),\n html.Div(\n [\n html.Div(\n [\n html.P(\n \"Filter by construction date (or select range in histogram):\",\n className=\"control_label\",\n ),\n dcc.RangeSlider(\n id=\"year_slider\",\n min=1960,\n max=2017,\n value=[1990, 2010],\n className=\"dcc_control\",\n ),\n html.P(\"Filter by well status:\", className=\"control_label\"),\n dcc.RadioItems(\n id=\"well_status_selector\",\n options=[\n {\"label\": \"All \", \"value\": \"all\"},\n {\"label\": \"Active only \", \"value\": \"active\"},\n {\"label\": \"Customize \", \"value\": \"custom\"},\n ],\n value=\"active\",\n labelStyle={\"display\": \"inline-block\"},\n className=\"dcc_control\",\n ),\n dcc.Dropdown(\n id=\"well_statuses\",\n options=well_status_options,\n multi=True,\n value=list(WELL_STATUSES.keys()),\n className=\"dcc_control\",\n ),\n dcc.Checklist(\n id=\"lock_selector\",\n options=[{\"label\": \"Lock camera\", \"value\": \"locked\"}],\n className=\"dcc_control\",\n value=[],\n ),\n html.P(\"Filter by well type:\", className=\"control_label\"),\n dcc.RadioItems(\n id=\"well_type_selector\",\n options=[\n {\"label\": \"All \", \"value\": \"all\"},\n {\"label\": \"Productive only \", \"value\": \"productive\"},\n {\"label\": \"Customize \", \"value\": \"custom\"},\n ],\n value=\"productive\",\n labelStyle={\"display\": \"inline-block\"},\n className=\"dcc_control\",\n ),\n dcc.Dropdown(\n id=\"well_types\",\n options=well_type_options,\n multi=True,\n value=list(WELL_TYPES.keys()),\n className=\"dcc_control\",\n ),\n ],\n className=\"pretty_container four columns\",\n id=\"cross-filter-options\",\n ),\n html.Div(\n [\n html.Div(\n [\n html.Div(\n [html.H6(id=\"well_text\"), html.P(\"No. of Wells\")],\n id=\"wells\",\n className=\"mini_container\",\n ),\n html.Div(\n [html.H6(id=\"gasText\"), html.P(\"Gas\")],\n id=\"gas\",\n className=\"mini_container\",\n ),\n html.Div(\n [html.H6(id=\"oilText\"), html.P(\"Oil\")],\n id=\"oil\",\n className=\"mini_container\",\n ),\n html.Div(\n [html.H6(id=\"waterText\"), html.P(\"Water\")],\n id=\"water\",\n className=\"mini_container\",\n ),\n ],\n id=\"info-container\",\n className=\"row container-display\",\n ),\n html.Div(\n [dcc.Graph(id=\"count_graph\")],\n id=\"countGraphContainer\",\n className=\"pretty_container\",\n ),\n ],\n id=\"right-column\",\n className=\"eight columns\",\n ),\n ],\n className=\"row flex-display\",\n ),\n html.Div(\n [\n html.Div(\n [dcc.Graph(id=\"main_graph\")],\n className=\"pretty_container seven columns\",\n ),\n html.Div(\n [dcc.Graph(id=\"individual_graph\")],\n className=\"pretty_container five columns\",\n ),\n ],\n className=\"row flex-display\",\n ),\n html.Div(\n [\n html.Div(\n [dcc.Graph(id=\"pie_graph\")],\n className=\"pretty_container seven columns\",\n ),\n html.Div(\n [dcc.Graph(id=\"aggregate_graph\")],\n className=\"pretty_container five columns\",\n ),\n ],\n className=\"row flex-display\",\n ),\n ],\n id=\"mainContainer\",\n style={\"display\": \"flex\", \"flex-direction\": \"column\"},\n)\n\n\n# Helper functions\ndef human_format(num):\n if num == 0:\n return \"0\"\n\n magnitude = int(math.log(num, 1000))\n mantissa = str(int(num / (1000 ** magnitude)))\n return mantissa + [\"\", \"K\", \"M\", \"G\", \"T\", \"P\"][magnitude]\n\n\ndef filter_dataframe(df, well_statuses, well_types, year_slider):\n dff = df[\n df[\"Well_Status\"].isin(well_statuses)\n & df[\"Well_Type\"].isin(well_types)\n & (df[\"Date_Well_Completed\"] > dt.datetime(year_slider[0], 1, 1))\n & (df[\"Date_Well_Completed\"] < dt.datetime(year_slider[1], 1, 1))\n ]\n return dff\n\n\ndef produce_individual(api_well_num):\n try:\n points[api_well_num]\n except:\n return None, None, None, None\n\n index = list(\n range(min(points[api_well_num].keys()), max(points[api_well_num].keys()) + 1)\n )\n gas = []\n oil = []\n water = []\n\n for year in index:\n try:\n gas.append(points[api_well_num][year][\"Gas Produced, MCF\"])\n except:\n gas.append(0)\n try:\n oil.append(points[api_well_num][year][\"Oil Produced, bbl\"])\n except:\n oil.append(0)\n try:\n water.append(points[api_well_num][year][\"Water Produced, bbl\"])\n except:\n water.append(0)\n\n return index, gas, oil, water\n\n\ndef produce_aggregate(selected, year_slider):\n\n index = list(range(max(year_slider[0], 1985), 2016))\n gas = []\n oil = []\n water = []\n\n for year in index:\n count_gas = 0\n count_oil = 0\n count_water = 0\n for api_well_num in selected:\n try:\n count_gas += points[api_well_num][year][\"Gas Produced, MCF\"]\n except:\n pass\n try:\n count_oil += points[api_well_num][year][\"Oil Produced, bbl\"]\n except:\n pass\n try:\n count_water += points[api_well_num][year][\"Water Produced, bbl\"]\n except:\n pass\n gas.append(count_gas)\n oil.append(count_oil)\n water.append(count_water)\n\n return index, gas, oil, water\n\n\n# Create callbacks\napp.clientside_callback(\n ClientsideFunction(namespace=\"clientside\", function_name=\"resize\"),\n Output(\"output-clientside\", \"children\"),\n [Input(\"count_graph\", \"figure\")],\n)\n\n\[email protected](\n Output(\"aggregate_data\", \"data\"),\n [\n Input(\"well_statuses\", \"value\"),\n Input(\"well_types\", \"value\"),\n Input(\"year_slider\", \"value\"),\n ],\n)\ndef update_production_text(well_statuses, well_types, year_slider):\n\n dff = filter_dataframe(df, well_statuses, well_types, year_slider)\n selected = dff[\"API_WellNo\"].values\n index, gas, oil, water = produce_aggregate(selected, year_slider)\n return [human_format(sum(gas)), human_format(sum(oil)), human_format(sum(water))]\n\n\n# Radio -> multi\[email protected](\n Output(\"well_statuses\", \"value\"), [Input(\"well_status_selector\", \"value\")]\n)\ndef display_status(selector):\n if selector == \"all\":\n return list(WELL_STATUSES.keys())\n elif selector == \"active\":\n return [\"AC\"]\n return []\n\n\n# Radio -> multi\[email protected](Output(\"well_types\", \"value\"), [Input(\"well_type_selector\", \"value\")])\ndef display_type(selector):\n if selector == \"all\":\n return list(WELL_TYPES.keys())\n elif selector == \"productive\":\n return [\"GD\", \"GE\", \"GW\", \"IG\", \"IW\", \"OD\", \"OE\", \"OW\"]\n return []\n\n\n# Slider -> count graph\[email protected](Output(\"year_slider\", \"value\"), [Input(\"count_graph\", \"selectedData\")])\ndef update_year_slider(count_graph_selected):\n\n if count_graph_selected is None:\n return [1990, 2010]\n\n nums = [int(point[\"pointNumber\"]) for point in count_graph_selected[\"points\"]]\n return [min(nums) + 1960, max(nums) + 1961]\n\n\n# Selectors -> well text\[email protected](\n Output(\"well_text\", \"children\"),\n [\n Input(\"well_statuses\", \"value\"),\n Input(\"well_types\", \"value\"),\n Input(\"year_slider\", \"value\"),\n ],\n)\ndef update_well_text(well_statuses, well_types, year_slider):\n\n dff = filter_dataframe(df, well_statuses, well_types, year_slider)\n return dff.shape[0]\n\n\[email protected](\n [\n Output(\"gasText\", \"children\"),\n Output(\"oilText\", \"children\"),\n Output(\"waterText\", \"children\"),\n ],\n [Input(\"aggregate_data\", \"data\")],\n)\ndef update_text(data):\n return data[0] + \" mcf\", data[1] + \" bbl\", data[2] + \" bbl\"\n\n\n# Selectors -> main graph\[email protected](\n Output(\"main_graph\", \"figure\"),\n [\n Input(\"well_statuses\", \"value\"),\n Input(\"well_types\", \"value\"),\n Input(\"year_slider\", \"value\"),\n ],\n [State(\"lock_selector\", \"value\"), State(\"main_graph\", \"relayoutData\")],\n)\ndef make_main_figure(\n well_statuses, well_types, year_slider, selector, main_graph_layout\n):\n\n dff = filter_dataframe(df, well_statuses, well_types, year_slider)\n\n traces = []\n for well_type, dfff in dff.groupby(\"Well_Type\"):\n trace = dict(\n type=\"scattermapbox\",\n lon=dfff[\"Surface_Longitude\"],\n lat=dfff[\"Surface_latitude\"],\n text=dfff[\"Well_Name\"],\n customdata=dfff[\"API_WellNo\"],\n name=WELL_TYPES[well_type],\n marker=dict(size=4, opacity=0.6),\n )\n traces.append(trace)\n\n # relayoutData is None by default, and {'autosize': True} without relayout action\n if main_graph_layout is not None and selector is not None and \"locked\" in selector:\n if \"mapbox.center\" in main_graph_layout.keys():\n lon = float(main_graph_layout[\"mapbox.center\"][\"lon\"])\n lat = float(main_graph_layout[\"mapbox.center\"][\"lat\"])\n zoom = float(main_graph_layout[\"mapbox.zoom\"])\n layout[\"mapbox\"][\"center\"][\"lon\"] = lon\n layout[\"mapbox\"][\"center\"][\"lat\"] = lat\n layout[\"mapbox\"][\"zoom\"] = zoom\n\n figure = dict(data=traces, layout=layout)\n return figure\n\n\n# Main graph -> individual graph\[email protected](Output(\"individual_graph\", \"figure\"), [Input(\"main_graph\", \"hoverData\")])\ndef make_individual_figure(main_graph_hover):\n\n layout_individual = copy.deepcopy(layout)\n\n if main_graph_hover is None:\n main_graph_hover = {\n \"points\": [\n {\"curveNumber\": 4, \"pointNumber\": 569, \"customdata\": 31101173130000}\n ]\n }\n\n chosen = [point[\"customdata\"] for point in main_graph_hover[\"points\"]]\n index, gas, oil, water = produce_individual(chosen[0])\n\n if index is None:\n annotation = dict(\n text=\"No data available\",\n x=0.5,\n y=0.5,\n align=\"center\",\n showarrow=False,\n xref=\"paper\",\n yref=\"paper\",\n )\n layout_individual[\"annotations\"] = [annotation]\n data = []\n else:\n data = [\n dict(\n type=\"scatter\",\n mode=\"lines+markers\",\n name=\"Gas Produced (mcf)\",\n x=index,\n y=gas,\n line=dict(shape=\"spline\", smoothing=2, width=1, color=\"#fac1b7\"),\n marker=dict(symbol=\"diamond-open\"),\n ),\n dict(\n type=\"scatter\",\n mode=\"lines+markers\",\n name=\"Oil Produced (bbl)\",\n x=index,\n y=oil,\n line=dict(shape=\"spline\", smoothing=2, width=1, color=\"#a9bb95\"),\n marker=dict(symbol=\"diamond-open\"),\n ),\n dict(\n type=\"scatter\",\n mode=\"lines+markers\",\n name=\"Water Produced (bbl)\",\n x=index,\n y=water,\n line=dict(shape=\"spline\", smoothing=2, width=1, color=\"#92d8d8\"),\n marker=dict(symbol=\"diamond-open\"),\n ),\n ]\n layout_individual[\"title\"] = dataset[chosen[0]][\"Well_Name\"]\n\n figure = dict(data=data, layout=layout_individual)\n return figure\n\n\n# Selectors, main graph -> aggregate graph\[email protected](\n Output(\"aggregate_graph\", \"figure\"),\n [\n Input(\"well_statuses\", \"value\"),\n Input(\"well_types\", \"value\"),\n Input(\"year_slider\", \"value\"),\n Input(\"main_graph\", \"hoverData\"),\n ],\n)\ndef make_aggregate_figure(well_statuses, well_types, year_slider, main_graph_hover):\n\n layout_aggregate = copy.deepcopy(layout)\n\n if main_graph_hover is None:\n main_graph_hover = {\n \"points\": [\n {\"curveNumber\": 4, \"pointNumber\": 569, \"customdata\": 31101173130000}\n ]\n }\n\n chosen = [point[\"customdata\"] for point in main_graph_hover[\"points\"]]\n well_type = dataset[chosen[0]][\"Well_Type\"]\n dff = filter_dataframe(df, well_statuses, well_types, year_slider)\n\n selected = dff[dff[\"Well_Type\"] == well_type][\"API_WellNo\"].values\n index, gas, oil, water = produce_aggregate(selected, year_slider)\n\n data = [\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"Gas Produced (mcf)\",\n x=index,\n y=gas,\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#F9ADA0\"),\n ),\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"Oil Produced (bbl)\",\n x=index,\n y=oil,\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#849E68\"),\n ),\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"Water Produced (bbl)\",\n x=index,\n y=water,\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#59C3C3\"),\n ),\n ]\n layout_aggregate[\"title\"] = \"Aggregate: \" + WELL_TYPES[well_type]\n\n figure = dict(data=data, layout=layout_aggregate)\n return figure\n\n\n# Selectors, main graph -> pie graph\[email protected](\n Output(\"pie_graph\", \"figure\"),\n [\n Input(\"well_statuses\", \"value\"),\n Input(\"well_types\", \"value\"),\n Input(\"year_slider\", \"value\"),\n ],\n)\ndef make_pie_figure(well_statuses, well_types, year_slider):\n\n layout_pie = copy.deepcopy(layout)\n\n dff = filter_dataframe(df, well_statuses, well_types, year_slider)\n\n selected = dff[\"API_WellNo\"].values\n index, gas, oil, water = produce_aggregate(selected, year_slider)\n\n aggregate = dff.groupby([\"Well_Type\"]).count()\n\n data = [\n dict(\n type=\"pie\",\n labels=[\"Gas\", \"Oil\", \"Water\"],\n values=[sum(gas), sum(oil), sum(water)],\n name=\"Production Breakdown\",\n text=[\n \"Total Gas Produced (mcf)\",\n \"Total Oil Produced (bbl)\",\n \"Total Water Produced (bbl)\",\n ],\n hoverinfo=\"text+value+percent\",\n textinfo=\"label+percent+name\",\n hole=0.5,\n marker=dict(colors=[\"#fac1b7\", \"#a9bb95\", \"#92d8d8\"]),\n domain={\"x\": [0, 0.45], \"y\": [0.2, 0.8]},\n ),\n dict(\n type=\"pie\",\n labels=[WELL_TYPES[i] for i in aggregate.index],\n values=aggregate[\"API_WellNo\"],\n name=\"Well Type Breakdown\",\n hoverinfo=\"label+text+value+percent\",\n textinfo=\"label+percent+name\",\n hole=0.5,\n marker=dict(colors=[WELL_COLORS[i] for i in aggregate.index]),\n domain={\"x\": [0.55, 1], \"y\": [0.2, 0.8]},\n ),\n ]\n layout_pie[\"title\"] = \"Production Summary: {} to {}\".format(\n year_slider[0], year_slider[1]\n )\n layout_pie[\"font\"] = dict(color=\"#777777\")\n layout_pie[\"legend\"] = dict(\n font=dict(color=\"#CCCCCC\", size=\"10\"), orientation=\"h\", bgcolor=\"rgba(0,0,0,0)\"\n )\n\n figure = dict(data=data, layout=layout_pie)\n return figure\n\n\n# Selectors -> count graph\[email protected](\n Output(\"count_graph\", \"figure\"),\n [\n Input(\"well_statuses\", \"value\"),\n Input(\"well_types\", \"value\"),\n Input(\"year_slider\", \"value\"),\n ],\n)\ndef make_count_figure(well_statuses, well_types, year_slider):\n\n layout_count = copy.deepcopy(layout)\n\n dff = filter_dataframe(df, well_statuses, well_types, [1960, 2017])\n g = dff[[\"API_WellNo\", \"Date_Well_Completed\"]]\n g.index = g[\"Date_Well_Completed\"]\n g = g.resample(\"A\").count()\n\n colors = []\n for i in range(1960, 2018):\n if i >= int(year_slider[0]) and i < int(year_slider[1]):\n colors.append(\"rgb(123, 199, 255)\")\n else:\n colors.append(\"rgba(123, 199, 255, 0.2)\")\n\n data = [\n dict(\n type=\"scatter\",\n mode=\"markers\",\n x=g.index,\n y=g[\"API_WellNo\"] / 2,\n name=\"All Wells\",\n opacity=0,\n hoverinfo=\"skip\",\n ),\n dict(\n type=\"bar\",\n x=g.index,\n y=g[\"API_WellNo\"],\n name=\"All Wells\",\n marker=dict(color=colors),\n ),\n ]\n\n layout_count[\"title\"] = \"Completed Wells/Year\"\n layout_count[\"dragmode\"] = \"select\"\n layout_count[\"showlegend\"] = False\n layout_count[\"autosize\"] = True\n\n figure = dict(data=data, layout=layout_count)\n return figure\n\n\n# Main\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n"
] |
[
[
"pandas.to_datetime"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
psydox/Axie_Manager_Bot
|
[
"34894e4f6f3eb55efbdea2d4a1e54ec23ffd1ffd"
] |
[
"src/cogs/loops/axie_trades.py"
] |
[
"##> Imports\nimport math\nimport sys\n\n# > 3rd party dependencies\nimport pandas as pd\nimport gspread\nimport gspread_dataframe as gd\nfrom urllib.request import urlopen\nfrom PIL import Image\nfrom io import BytesIO\n\n# > Discord dependencies\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.tasks import loop\n\n# > Local dependencies\nfrom alerts.api import api_genes, api_owner_axies\nfrom config import config\n\n# Login using the .json file\ngc = gspread.service_account(filename=\"authentication.json\")\n\n\nclass Axie_trades(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # This serves as a database, saves: \"id\", \"auction\", \"class\", \"breedCount\", \"parts\", \"image\", \"price\"\n self.axie_db = pd.DataFrame({})\n\n # Start function execution\n self.get_axie_auctions.start()\n\n # Could be quicker\n @loop(minutes=15)\n async def get_axie_auctions(self):\n \"\"\"Main function that is looped every hour\"\"\"\n\n # Save all important data in dataframe\n df = pd.DataFrame({})\n\n # Get the address dataframe\n address_df = await self.get_addresses()\n addresses = address_df[\"Address\"].tolist()\n\n # Do this for every address in the dataframe\n for address in addresses:\n owned_axies = await self.get_axies(address)\n owned_axies[\"Manager\"] = address_df.loc[address_df[\"Address\"] == address][\n \"Manager\"\n ].tolist()[0]\n df = pd.concat([df, owned_axies], ignore_index=True)\n\n # If axie_ids is empty\n if self.axie_db.empty:\n self.axie_db = df\n # Compare\n else:\n # Get all ids of current axies\n new_ids = df[\"id\"].tolist()\n old_ids = self.axie_db[\"id\"].tolist()\n\n # Difference: XOR\n diff = list(set(new_ids) ^ set(old_ids))\n\n # Sold\n if len(new_ids) < len(old_ids):\n for id in diff:\n await self.send_msg(self.axie_db, id, \"sold\")\n\n # Buy\n elif len(new_ids) > len(old_ids):\n for id in diff:\n await self.send_msg(df, id, \"bought\")\n\n # No difference in ids\n else:\n # Check if price is not NaN\n new_auctions = df.loc[~df[\"price\"].isna()][\"id\"].tolist()\n old_auctions = self.axie_db.loc[~self.axie_db[\"price\"].isna()][\n \"id\"\n ].tolist()\n\n # Difference: XOR\n auction_diff = list(set(new_auctions) ^ set(old_auctions))\n\n # New listing!\n if len(new_auctions) > len(old_auctions):\n for id in auction_diff:\n await self.send_msg(df, id, \"is selling\")\n\n # Update old db\n self.axie_db = df\n\n async def send_msg(self, df, id, keyword):\n \"\"\"Sends a message in the discord channel\"\"\"\n\n # Set variables based on id and df\n row = df.loc[df[\"id\"] == id]\n link = (\n \"https://marketplace.axieinfinity.com/axie/\" + row[\"id\"].tolist()[0] + \"/\"\n )\n\n # Call genes api\n genes = await self.get_genes(id)\n\n if not genes.empty:\n d = \"\"\n r1 = \"\"\n r2 = \"\"\n r1_title = f\"R1 ({genes['r1 deviation'].tolist()[0]})\"\n r2_title = f\"R2 ({genes['r2 deviation'].tolist()[0]})\"\n\n for part in [\"eyes\", \"ears\", \"mouth\", \"horn\", \"back\", \"tail\"]:\n d += f\"{(genes[part].tolist()[0]['d']['name'])}\\n\"\n r1 += f\"{(genes[part].tolist()[0]['r1']['name'])}\\n\"\n r2 += f\"{(genes[part].tolist()[0]['r2']['name'])}\\n\"\n\n else:\n d = r1 = r2 = \"Unknown\"\n r1_title = \"R1\"\n r2_title = \"R2\"\n\n # Send message in discord channel\n channel = discord.utils.get(\n self.bot.get_all_channels(),\n guild__name=config[\"DEBUG\"][\"GUILD_NAME\"]\n if len(sys.argv) > 1 and sys.argv[1] == \"-test\"\n else config[\"DISCORD\"][\"GUILD_NAME\"],\n name=config[\"LOOPS\"][\"AXIE_TRADES\"][\"CHANNEL\"],\n )\n\n # Price\n if not math.isnan(row[\"price\"].tolist()[0]):\n e = discord.Embed(\n title=f\"{row['Manager'].tolist()[0]} {keyword} axie named {row['name'].tolist()[0]} for ${str(row['price'].tolist()[0])}\",\n description=\"\",\n url=link,\n color=0x00FFFF,\n )\n else:\n e = discord.Embed(\n title=f\"{row['Manager'].tolist()[0]} {keyword} axie named {row['name'].tolist()[0]}\",\n description=\"\",\n url=link,\n color=0x00FFFF,\n )\n\n e.set_author(name=\"Axie Manager\", icon_url=self.bot.user.avatar_url)\n\n # Breedcount\n e.add_field(\n name=\":eggplant:\",\n value=str(round(row[\"breedCount\"].tolist()[0])),\n inline=True,\n )\n\n e.add_field(name=\"Class\", value=row[\"class\"].tolist()[0], inline=True)\n if \"stats\" in genes.columns:\n [\n e.add_field(name=stat[1:-5].capitalize(), value=stat[-2:], inline=True)\n for stat in str(genes[\"stats\"].tolist()[0])[1:-28].split(\", \")\n ]\n e.add_field(name=\"D\", value=d, inline=True)\n e.add_field(name=r1_title, value=r1, inline=True)\n e.add_field(name=r2_title, value=r2, inline=True)\n\n # Create cropped image for thumbnail\n try:\n img = Image.open(urlopen(row[\"image\"].tolist()[0]))\n width, height = img.size\n img_cropped = img.crop((300, 220, width - 300, height - 220))\n temp = BytesIO()\n img_cropped.save(temp, img.format)\n temp.seek(0)\n file = discord.File(temp, filename=\"a.png\")\n e.set_thumbnail(url=\"attachment://a.png\")\n await channel.send(file=file, embed=e)\n except Exception:\n pass\n\n await channel.send(embed=e)\n\n async def get_genes(self, id):\n \"\"\"Takes axie id and returns its genes\"\"\"\n\n try:\n response = await api_genes(id)\n except Exception as e:\n print(e)\n print(\"Error fetching api_genes\")\n # Return an empty dataframe, so no crashes will occur\n return pd.DataFrame({})\n\n df = pd.DataFrame.from_dict(response, orient=\"index\")\n genes = df.transpose()\n\n if genes[\"stage\"].tolist()[0] == 1:\n return pd.DataFrame({})\n\n for part in [\"eyes\", \"ears\", \"mouth\", \"horn\", \"back\", \"tail\"]:\n genes[part] = genes[\"traits\"].apply(lambda x: x[part])\n\n # Count deviations for every part\n for part in [\"mouth\", \"horn\", \"back\", \"tail\"]:\n genes[f\"{part} r1\"] = [0 if x[\"d\"] == x[\"r1\"] else 1 for x in genes[part]]\n genes[f\"{part} r2\"] = [0 if x[\"d\"] == x[\"r2\"] else 1 for x in genes[part]]\n\n # Sum all the deviations\n genes[\"r1 deviation\"] = (\n genes[\"mouth r1\"] + genes[\"horn r1\"] + genes[\"back r1\"] + genes[\"tail r1\"]\n )\n genes[\"r2 deviation\"] = (\n genes[\"mouth r2\"] + genes[\"horn r2\"] + genes[\"back r2\"] + genes[\"tail r2\"]\n )\n\n return genes\n\n async def get_addresses(self):\n \"\"\"Gets all Ronin addresses in Scholars spreadsheet\"\"\"\n\n # Open Scholars spreadsheet\n sheet = gc.open(\"Scholars\")\n\n # Get the Scholars and Funds worksheet as dataframe\n scholars = (\n gd.get_as_dataframe(sheet.worksheet(\"Scholars\"))\n .dropna(axis=0, how=\"all\")\n .dropna(axis=1, how=\"all\")\n )\n funds = (\n gd.get_as_dataframe(sheet.worksheet(\"Funds\"))\n .dropna(axis=0, how=\"all\")\n .dropna(axis=1, how=\"all\")\n )\n\n # We only care about these columns\n scholars = scholars[[\"Manager\", \"Address\"]]\n funds = funds.rename(columns={\"Funds Address\": \"Address\"})\n\n # Merge the dataframes\n addresses = pd.concat([scholars, funds], ignore_index=True)\n\n # Replace ronin: with 0x for API\n addresses[\"Address\"] = addresses[\"Address\"].str.replace(\"ronin:\", \"0x\")\n\n return addresses\n\n async def get_axies(self, address):\n \"\"\"\n Processes api results and returns the dataframe\n \"\"\"\n\n try:\n df = await api_owner_axies(address)\n except Exception:\n return pd.DataFrame({})\n\n # Replace parts by their part name, if there are any parts available\n if \"parts\" in df.columns:\n df[\"parts\"] = [[d.get(\"name\") for d in x] for x in df[\"parts\"]]\n\n # Save the price in dataframe\n if \"auction\" in df.columns:\n df[\"price\"] = pd.to_numeric(\n df[\"auction\"].apply(lambda x: x[\"currentPriceUSD\"] if x != None else x)\n )\n\n return df\n\n\ndef setup(bot):\n bot.add_cog(Axie_trades(bot))\n"
] |
[
[
"pandas.concat",
"pandas.DataFrame",
"pandas.DataFrame.from_dict"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jbjjbjjbj/eittek651
|
[
"3f735eb1836fc6e144b885654f71d3fe2b4e2c03",
"3f735eb1836fc6e144b885654f71d3fe2b4e2c03"
] |
[
"examples/selection_rssi_graph.py",
"examples/benchmark_gfsk_modulate.py"
] |
[
"import ad_path\nimport antenna_diversity as ad\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\"\"\"\nThis file will show the insights of a chosen selection algorithm.\nVery nice for seing what is actually done.\n\nIt will output the following data:\n- A plot showing CRC error and the selected branch\n- A plot showing fading values for each plot\n- A printout of all branches with usage statistics\n\nTo make the plot readable only 100 frames are sent through, but this can be\nadjusted.\n\nTo make comparison between algorithms easy, the generator is seeded before.\nTherefore each run will have CRC errors etc. in the same place each time.\n\"\"\"\n\nad_path.nop()\n\nencoder = ad.encoding.SymbolEncoder(2)\nmodulator = ad.modulation.GFSK()\nbranches = 3\nchannel = ad.channel.RayleighAWGNChannel(branches, 10)\n\n# selector = ad.diversity_technique.selection.ReneDif()\nselector = ad.diversity_technique.selection.CRCSelection(branches)\n\nframes = 1000\nslots_to_plot = 150\nbits_per_slot = 440\nslots_per_frame = 1\n\n\ndef make_frame_array():\n frame_array = []\n for i in range(slots_per_frame):\n data = ad.protocols.dect.Full.with_random_payload().to_bytes()\n frame_array.append(data)\n return frame_array\n\n\ndef calculate_db(input):\n return 10 * math.log10(input)\n\n\nslots = 0\nselected = []\nerrors = []\nfading_t = []\nrssi_branch1 = []\nrssi_branch2 = []\nrssi_branch3 = []\nrssi_selected = []\n\nfor frame_number in range(frames):\n frame = make_frame_array()\n for slot in frame:\n symbols = encoder.encode_msb(slot)\n moded = modulator.modulate(symbols)\n recv, h = channel.run(moded)\n fading_t.append(h)\n selc, index = ad.diversity_technique.selection.selection_from_power(\n recv)\n rssi_branch1.append(calculate_db(\n ad.diversity_technique.selection.calculate_power(recv[0][0:32 * 4])))\n rssi_branch2.append(calculate_db(\n ad.diversity_technique.selection.calculate_power(recv[1][0:32 * 4])))\n rssi_branch3.append(calculate_db(\n ad.diversity_technique.selection.calculate_power(recv[2][0:32 * 4])))\n slots += 1\n selected.append(index)\n if(index == 0):\n rssi_selected.append(calculate_db(\n ad.diversity_technique.selection.calculate_power(recv[0][0:32 * 4])))\n elif(index == 1):\n rssi_selected.append(calculate_db(\n ad.diversity_technique.selection.calculate_power(recv[1][0:32 * 4])))\n else:\n rssi_selected.append(calculate_db(\n ad.diversity_technique.selection.calculate_power(recv[2][0:32 * 4])))\n\n # selector.report_crc_status(not error)\n\n channel.frame_sent()\n\n # print(f\"frame_id: {frame_number}\")\n\nplt.figure(figsize=(8, 5))\nplt.plot(rssi_branch1[50:150], '.--')\nplt.plot(rssi_branch2[50:150], '.--')\nplt.plot(rssi_branch3[50:150], '.--')\nplt.plot(rssi_selected[50:150], '-')\nplt.legend(['Branch 1', 'Branch 2', 'Branch 3', 'Selected branch'])\nplt.xlabel('Packet number [-]')\nplt.ylabel('Power [dB]')\nplt.savefig(\"selection_rssi_plot.pdf\")\nplt.show()\n",
"import numpy as np\n\nimport ad_path\nad_path.nop()\nimport antenna_diversity as ad\n\nimport timeit\n\nmodem = ad.modulation.GFSK()\n\ndef run_the_thing():\n modem.modulate(np.random.randint(2, size=10000))\n\ntime = timeit.timeit(run_the_thing, number=100)\nprint(time)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
c37102001/DQN
|
[
"d9d74787547d8649ec766b93211c7bd159f2484f"
] |
[
"improved_pg/main.py"
] |
[
"import argparse, os\n\nimport matplotlib.pyplot as plt\nimport gym\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nimport importance_sampling\nimport reinforce\n\nplt.style.use('ggplot')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--env-name', type=str, default='CartPole-v0')\nparser.add_argument('--max-steps', type=int, default=200, metavar='N')\nparser.add_argument('--num-episodes', type=int, default=1000, metavar='N')\nparser.add_argument('--num-trajs', type=int, default=10, metavar='N')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G')\nparser.add_argument('--lr', type=float, default=1e-3, metavar='G')\nparser.add_argument('--hidden_layer', type=int, default=128, metavar='N')\nparser.add_argument('--seed', type=int, default=777, metavar='N', )\nparser.add_argument('--reinforce', action='store_true', help='Use REINFORCE instead of importance sampling')\n\nargs = parser.parse_args()\n\n\ndef main():\n env = gym.make(args.env_name)\n\n env.seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n\n if args.reinforce:\n agent = reinforce.Agent(args, env.observation_space.shape[0], env.action_space)\n else:\n agent = importance_sampling.Agent(args, env.observation_space.shape[0], env.action_space)\n\n trajs = []\n result = []\n episode = []\n\n for i_episode in range(args.num_episodes):\n\n s_t = torch.Tensor([env.reset()])\n\n states = []\n actions = []\n log_probs = []\n rewards = []\n\n for t in range(args.max_steps):\n a_t, log_prob = agent.action(s_t)\n s_t1, r_t, done, _ = env.step(a_t.numpy()[0][0])\n states.append(s_t)\n actions.append(a_t)\n log_probs.append(log_prob)\n rewards.append(r_t)\n s_t = torch.Tensor([s_t1])\n\n if done:\n break\n\n if len(trajs) >= args.num_trajs:\n trajs.pop(0)\n\n if args.reinforce:\n ##use most recent trajectory only\n trajs = []\n\n trajs.append((states, actions, rewards, log_probs))\n agent.train_(trajs)\n\n print(\"Episode: {}, reward: {}\".format(i_episode, sum(rewards)))\n if i_episode % 10 == 0:\n episode.append(i_episode)\n result.append(sum(rewards))\n\n env.close()\n return episode, result\n\n\nif __name__ == '__main__':\n args.reinforce = False\n episode1, No_IS_result = main()\n args.reinforce = True\n episode2, IS_result = main()\n\n plt.plot(episode1, No_IS_result)\n plt.plot(episode2, IS_result)\n plt.ylabel('reward')\n plt.xlabel('episodes')\n plt.grid(True)\n plt.savefig('improved_pg.png')\n\n"
] |
[
[
"numpy.random.seed",
"torch.Tensor",
"torch.manual_seed",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sarthakpati/Red-GAN
|
[
"45cee4ca73fb2c0665b76e3df049184511d18bc6"
] |
[
"unet/dataset/isic_dataset.py"
] |
[
"from torch.utils.data import Dataset as BaseDataset\nimport os\nimport cv2\nimport glob\nimport numpy as np\nimport json\n\n\nclass Dataset(BaseDataset):\n\n def __init__(\n self,\n images_dir,\n masks_dir,\n classes=None,\n augmentation=None,\n preprocessing=None,\n scanner=None,\n synthesized=False,\n isic_meta_data='./data/isic/meta_data.json',\n ):\n self.classes = classes\n self.lesion_classes = [\n 'melanoma',\n 'seborrheic keratosis',\n 'nevus'\n ]\n\n files = os.listdir(images_dir)\n format_img = \".png\" if synthesized else \".jpg\"\n\n ids = {key: [] for key in self.lesion_classes}\n with open(isic_meta_data, 'r') as f:\n meta_data = json.load(f)\n\n for meta in meta_data:\n diag = meta[\"meta\"][\"clinical\"][\"diagnosis\"]\n if meta[\"name\"] + format_img in files:\n ids[diag].append(meta[\"name\"])\n\n if scanner is None:\n self.ids = [os.path.basename(x) for x in glob.glob(images_dir + r'/*.*')]\n else:\n self.ids = ids[scanner]\n\n self.images_fps = [os.path.join(images_dir, image_id.split('.')[0] + format_img)\n for image_id in self.ids]\n self.masks_fps = [os.path.join(masks_dir, image_id.split('.')[0] + '_segmentation.png')\n for image_id in self.ids]\n\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n\n def __getitem__(self, i):\n image = cv2.imread(self.images_fps[i], cv2.IMREAD_COLOR)\n mask = cv2.imread(self.masks_fps[i], cv2.IMREAD_GRAYSCALE)\n\n # apply augmentations\n if self.augmentation:\n sample = self.augmentation(image=image, mask=mask)\n image, mask = sample['image'], sample['mask']\n\n image = np.swapaxes(image, 0, 2)\n image = np.swapaxes(image, 1, 2)\n mask = np.expand_dims(mask, axis=0).astype(np.float)\n mask = mask / 255.0\n\n return image, mask\n\n def __len__(self):\n return len(self.ids)\n"
] |
[
[
"numpy.swapaxes",
"numpy.expand_dims"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siebeniris/superresolution
|
[
"2eec93029c1332720ba17d5747ec9aee19bc0c63"
] |
[
"src/models/test_models.py"
] |
[
"# Test images with pretrained tensorflow models\n# call :\n# python models/research/object_detection/test_models.py --frozen_graph {$path_to_frozen_graph}\n# --label_map {$path_to_##.pbtxt} --test_dir {$path_to_test_set}\n# --num {$number_test_images} --output_dir {$path_to_save_output_imagesWITHboxes}\n\nimport argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\n# imports from object detection module\n# From tensorflow/models/research/\n# protoc object_detection/protos/*.proto --python_out=.\nfrom object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\nparser = argparse.ArgumentParser(description=\"Load tensorflow model and do object detection inference \")\nparser.add_argument(\"--frozen_graph\", type=str,\n default=\"models/faster_rcnn_resnet50/train20190612131800test/frozen_inference_graph.pb\",\n help=\"the path to the frozen graph\")\nparser.add_argument(\"--label_map\", type=str, default=\"data/processed/label_map.pbtxt\",\n help=\"the path to the label map\")\nparser.add_argument(\"--num\", type=int, default=3, help=\"the number of test images, set negative to test\"\n \"all images in the test_dir. \")\nparser.add_argument(\"--output_dir\", type=str, default=\"data/processed/tested\",\n help=\"the directory to store the output of object inference\")\nparser.add_argument(\"--seedling\", action=\"store_true\", default=False,\n help=\"if only the images with seedlings are applied\")\n\nargs = parser.parse_args()\n\n\ndef load_label_map(label_path):\n \"\"\"\n Label maps map indices to category names, so that when our convolution network predicts 5,\n we know that this corresponds to airplane. Here we use internal utility functions,\n but anything that returns a dictionary mapping integers to appropriate string labels would be fine\n :param label_path: the path to the label map\n :return: cateogry index from label map\n \"\"\"\n return label_map_util.create_category_index_from_labelmap(label_path, use_display_name=True)\n\n\ndef load_image_into_numpy_array(image):\n # helper corde\n (im_width, im_height) = image.size\n print('loaded image: ', image.size)\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndef get_test_image_paths(seedling, num):\n all_images = []\n if seedling:\n with open('data/interim/datasets/test_seedling_paths')as file:\n for line in file.readlines():\n name = line.replace('\\n', '')\n all_images.append(name)\n else:\n with open('data/interim/datasets/test_paths')as file:\n for line in file.readlines():\n name = line.replace('\\n', '')\n all_images.append(name)\n\n if num >= 0:\n return all_images[:num]\n else:\n return all_images\n\n\ndef run_inference_for_single_image(image, graph):\n with graph.as_default():\n with tf.Session() as sess:\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n\n # Reframe is required to translate mask from box coordinates to image coordinates\n # and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[1], image.shape[2])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: image})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.int64)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n\n return output_dict\n\n\ndef main():\n # load a frozen tensorflow model into memory\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(args.frozen_graph, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n # loading label map\n category_index = load_label_map(args.label_map)\n\n TEST_IMAGE_PATHS = get_test_image_paths(args.seedling, args.num)\n\n for image_path in TEST_IMAGE_PATHS:\n image_name = str(image_path).split('/')[-1]\n image = Image.open(image_path)\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n image_np = load_image_into_numpy_array(image)\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)\n\n # Visualization of the results of a detection.\n # uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.\n boxed_image = vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n instance_masks=output_dict.get('detection_masks'),\n use_normalized_coordinates=True,\n line_thickness=8)\n\n # save path for images with overlaid boxes\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n saved_path = os.path.join(args.output_dir, image_name)\n\n # convert numpy array to image\n im = Image.fromarray(boxed_image)\n # save image\n im.save(saved_path)\n print('saved image to {}'.format(saved_path))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.greater",
"tensorflow.slice",
"tensorflow.gfile.GFile",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"tensorflow.Session",
"tensorflow.get_default_graph",
"tensorflow.GraphDef"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Rishab26/causalnex
|
[
"127d9324a3d68c1795299c7522f22cdea880f344"
] |
[
"causalnex/structure/pytorch/dist_type/_base.py"
] |
[
"# Copyright 2019-2020 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior.\n\"\"\"\n\nimport itertools\nfrom abc import ABCMeta, abstractmethod\nfrom copy import deepcopy\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport torch\n\nfrom causalnex.structure.structuremodel import StructureModel\n\n\nclass DistTypeBase(metaclass=ABCMeta):\n \"\"\"Base class defining the distribution default behavior and interface\"\"\"\n\n def __init__(self, idx: int):\n \"\"\"\n Default constructor for the DistTypeBase class.\n Unless overridden, provides default behavior to all subclasses.\n\n Args:\n idx: Positional index in data passed to the NOTEARS algorithm\n which correspond to this datatype.\n \"\"\"\n self.idx = idx\n\n def get_columns(\n self,\n X: np.ndarray,\n ) -> np.ndarray:\n \"\"\"\n Gets the column(s) associated with the instantiated DistType.\n\n Args:\n X: Full dataset to be selected from.\n\n Returns:\n 1d or 2d np.ndarray of columns.\n \"\"\"\n return X[:, self.idx]\n\n # pylint: disable=no-self-use\n # pylint: disable=unused-argument\n def preprocess_X(self, X: np.ndarray, fit_transform: bool = True) -> np.ndarray:\n \"\"\"\n Overload this method to perform any required preprocessing of the data\n matrix. This can include data conversion, column expansion etc.\n Changes to the tabu parameters should also be done here.\n\n **WARN** This preprocessing CANNOT reorder the columns of X.\n\n Args:\n X: The original passed-in data.\n\n fit_transform: Whether the class first fits\n then transforms the data, or just transforms.\n Just transforming is used to preprocess new data after the\n initial NOTEARS fit.\n\n Returns:\n Preprocessed X\n \"\"\"\n return X\n\n # pylint: disable=no-self-use\n def preprocess_tabu_edges(\n self, tabu_edges: List[Tuple[int, int]]\n ) -> List[Tuple[int, int]]:\n \"\"\"\n Overload this method to perform any required preprocessing of the tabu_edges.\n\n Args:\n tabu_edges: The original tabu_edges.\n\n Returns:\n Preprocessed tabu_edges.\n \"\"\"\n return tabu_edges\n\n # pylint: disable=no-self-use\n def preprocess_tabu_nodes(self, tabu_nodes: List[int]) -> List[int]:\n \"\"\"\n Overload this method to perform any required preprocessing of the tabu_nodes.\n\n Args:\n tabu_nodes: The original tabu_nodes.\n\n Returns:\n Preprocessed tabu_nodes.\n \"\"\"\n return tabu_nodes\n\n # pylint: disable=no-self-use\n def update_idx_col(self, idx_col: Dict[int, str]) -> Dict[int, str]:\n \"\"\"\n Overload this method to update the idx_col dict with expanded colnames.\n\n Args:\n idx_col: The original index to column mapping.\n\n Returns:\n Updated index to column mapping.\n \"\"\"\n return idx_col\n\n def add_to_node(self, sm: StructureModel) -> StructureModel:\n \"\"\"\n Adds self to a node of a structure model corresponding to self.idx.\n\n Args:\n sm: The input StructureModel\n\n Returns:\n Updated StructureModel\n \"\"\"\n sm.nodes[self.idx][\"dist_type\"] = self\n return sm\n\n # pylint: disable=no-self-use\n def modify_h(self, square_weight_mat: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Overload this method to apply updates to the W matrix in h(W).\n Typically used to prevent spurious cycles when using expended columns.\n\n Args:\n square_weight_mat: The weight matrix used in h(W).\n\n Returns:\n Updated weight matrix used in h(W).\n \"\"\"\n return square_weight_mat\n\n # pylint: disable=no-self-use\n def collapse_adj(self, adj: np.ndarray) -> np.ndarray:\n \"\"\"\n Overload this method to apply updates to collapse the W matrix\n of a multi-parameter distribution\n Likely has the same impact as modify_h.\n\n Args:\n adj: The adjacency matrix.\n\n Returns:\n Updated adjacency matrix.\n \"\"\"\n return adj\n\n @abstractmethod\n def loss(self, X: torch.Tensor, X_hat: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n X: The original data passed into NOTEARS (i.e. the reconstruction target).\n\n X_hat: The reconstructed data.\n\n Returns:\n Scalar pytorch tensor of the reconstruction loss between X and X_hat.\n \"\"\"\n raise NotImplementedError(\"Must implement the loss() method\")\n\n @abstractmethod\n def inverse_link_function(self, X_hat: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert the transformed data from the latent space to the original dtype\n using the inverse link function.\n\n Args:\n X_hat: Reconstructed data in the latent space.\n\n Returns:\n Modified X_hat.\n MUST be same shape as passed in data.\n Projects the self.idx column from the latent space to the dist_type space.\n \"\"\"\n raise NotImplementedError(\"Must implement the inverse_link_function() method\")\n\n\nclass ExpandColumnsMixin:\n \"\"\"\n Mixin class providing convenience methods for column expansion.\n \"\"\"\n\n @staticmethod\n def _expand_columns(X: np.ndarray, new_columns: np.ndarray) -> np.ndarray:\n \"\"\"\n Expands the data matrix columns without reordering the indices.\n\n Args:\n X: Base dataset to expand.\n\n new_columns: The columns to expand the dataset by.\n\n Returns:\n Expanded dataset.\n \"\"\"\n return np.hstack([X, new_columns])\n\n @staticmethod\n def update_tabu_edges(\n idx_group: List[int],\n tabu_edges: List[Tuple[int, int]],\n tabu_idx_group: bool,\n ) -> List[Tuple[int, int]]:\n \"\"\"\n Tabu edges are:\n 1. all user defined connections to original feature column\n 2. all inter-feature connections (optional)\n\n Args:\n idx_group: The group of indices which correspond to a single\n expanded column.\n\n tabu_edges: The list of tabu_edges to be updated.\n\n tabu_idx_group: Whether inter-group edges should also be considered tabu.\n I.e if a result of a column expansion, often want to prevent edges being learned\n between parameters.\n\n Returns:\n Updated tabu_edges\n \"\"\"\n\n if tabu_edges is None:\n tabu_edges = []\n\n # copy to prevent mutations\n tabu_edges = deepcopy(tabu_edges)\n\n # handle 1.\n new_tabu_edges = []\n # for each original tabu pair\n for (i, j) in tabu_edges:\n # idx_group[0] is the original column index\n if i == idx_group[0]:\n new_tabu_edges += [(idx, j) for idx in idx_group[1:]]\n elif j == idx_group[0]:\n new_tabu_edges += [(i, idx) for idx in idx_group[1:]]\n # all new edges added to tabu_edges\n tabu_edges += new_tabu_edges\n\n # handle 2.\n if tabu_idx_group:\n # add on all pairwise permutations of particular feature group\n # NOTE: permutations are needed for edge directionality\n tabu_edges += list(itertools.permutations(idx_group, 2))\n\n return tabu_edges\n\n @staticmethod\n def update_tabu_nodes(\n idx_group: List[int], tabu_nodes: List[int]\n ) -> List[Tuple[int, int]]:\n \"\"\"\n Tabu nodes are:\n 1. all user defined connections to original feature column\n\n Args:\n idx_group: The group of indices which correspond to a single\n expanded column.\n\n tabu_nodes: The list of tabu_nodes to be updated.\n\n Returns:\n Updated tabu_nodes\n \"\"\"\n if tabu_nodes is None:\n return tabu_nodes\n\n # copy to prevent mutations\n tabu_nodes = deepcopy(tabu_nodes)\n\n new_tabu_nodes = []\n for i in tabu_nodes:\n # NOTE: the first element in the idx_group is guaranteed as self.idx\n if i == idx_group[0]:\n new_tabu_nodes += idx_group[1:]\n # add on the new tabu nodes\n tabu_nodes += new_tabu_nodes\n return tabu_nodes\n"
] |
[
[
"numpy.hstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oneconcern/stompy
|
[
"4efb78824804edc68555bced275e37842f98ba1f",
"4efb78824804edc68555bced275e37842f98ba1f",
"4efb78824804edc68555bced275e37842f98ba1f"
] |
[
"test/test_harm_decomp.py",
"test/test_cgal_line_walk.py",
"stompy/spatial/medial_axis.py"
] |
[
"from __future__ import print_function\n\n\nfrom stompy import harm_decomp\nimport numpy as np\n\ndef test_basic():\n # A sample problem:\n omegas = np.array([1.0,0.0])\n\n # the constructed data:\n amps = np.array([1,5.0])\n phis = np.array([1,0])\n\n t = np.linspace(0,10*np.pi,125)\n h = amps[0]*np.cos(omegas[0]*t - phis[0]) + amps[1]*np.cos(omegas[1]*t - phis[1])\n\n comps = harm_decomp.decompose(t,h,omegas)\n recon=harm_decomp.recompose(t,comps,omegas)\n\n assert np.allclose( recon, h)\n\n print(\"Components: \",comps)\n\n",
"from CGAL.CGAL_Triangulation_2 import Constrained_Delaunay_triangulation_2\nfrom CGAL.CGAL_Kernel import Point_2\n\nfrom stompy.grid import cgal_line_walk\n\nimport numpy as np\n\ndef test_cgal_link_walk():\n DT=Constrained_Delaunay_triangulation_2()\n\n xys=np.array( [ [0,0],\n [1,0],\n [0,1],\n [1,2] ],'f8' )\n # in some versions, Point_2 is picky that it gets doubles,\n # not ints.\n pnts=[Point_2(xy[0],xy[1])\n for xy in xys]\n\n vhs=[DT.insert(p) for p in pnts]\n\n DT.insert_constraint(vhs[0],vhs[2])\n\n ##\n\n res0=cgal_line_walk.line_walk(DT,vhs[0],vhs[1])\n\n assert not DT.is_constrained(res0[0][1])\n res1=cgal_line_walk.line_walk(DT,vhs[0],vhs[2])\n\n assert DT.is_constrained(res1[0][1])\n\n assert len(cgal_line_walk.line_conflicts(DT,p1=[5,5],p2=[5,6]))==0\n\n assert len(cgal_line_walk.line_conflicts(DT,p1=[0.5,-0.5],p2=[0.5,0.5]))==0\n\n assert len(cgal_line_walk.line_conflicts(DT,p1=[-0.5,0.5],p2=[0.5,0.5]))>0\n\n res3=cgal_line_walk.line_conflicts(DT,p1=[0,-1],p2=[2,1])\n assert len(res3)>0\n assert res3[0][0]=='v'\n\n\n",
"from __future__ import print_function\n\nimport os\nimport subprocess,os.path\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nfrom matplotlib.collections import LineCollection\n\nfrom shapely import geometry\n\nfrom . import field\n\nfrom ..utils import point_in_polygon\n\ndef plot_geo(geo):\n def plot_ring(r):\n points = np.array(r.coords)\n plt.plot( points[:,0],points[:,1],'k' )\n plot_ring(geo.exterior)\n for r in geo.interiors:\n plot_ring(r)\n\ndef geo2poly(geo_poly,poly_filename):\n \"\"\"\n given a polygon geometry, write a triangle compatible poly\n file\n \"\"\"\n print(\"Writing poly file \", poly_filename)\n \n # and then only the exterior ring:\n point_list = np.array(geo_poly.exterior.coords)\n # which at least sometimes has a duplicate node at the end that\n # we don't want\n if np.all(point_list[0]==point_list[-1]):\n point_list = point_list[:-1]\n\n npoints = point_list.shape[0]\n\n # triangle wants the basic planar-straight-line-graph\n poly_fp = open(poly_filename,'wt')\n\n # first line is\n # First line: <# of vertices> <dimension (must be 2)> <# of attributes> <# of boundary markers (0 or 1)>\n poly_fp.write(\"%i 2 0 0\\n\"%(npoints))\n # Write out vertices\n for i in range(npoints):\n # <vertex #> <x> <y> [attributes] [boundary marker] \n poly_fp.write(\"%i %f %f\\n\"%(i,point_list[i,0],point_list[i,1]))\n # Write out segments\n # <# of segments> <# of boundary markers (0 or 1)>\n poly_fp.write(\"%i 0\\n\"%(npoints))\n for i in range(npoints):\n # <segment #> <endpoint> <endpoint> [boundary marker]\n poly_fp.write(\"%i %i %i\\n\"%(i,i,(i+1)%npoints))\n # number of holes, which for the moment we ignore:\n poly_fp.write(\"0\\n\")\n poly_fp.close()\n\ndef load_triangle_nodes(node_filename):\n \"\"\"\n load nodes as output by triangle\n \"\"\"\n fp = open(node_filename,'rt')\n n_nodes, dim, nattrs, has_boundary_markers = map(int,fp.readline().split())\n \n nodes = np.zeros( (n_nodes,dim), np.float64)\n\n for i in range(n_nodes):\n idx,nodes[i,0],nodes[i,1] = map(float,fp.readline().split()[:3])\n fp.close()\n return nodes\n\ndef load_triangle_edges(edge_filename):\n \"\"\" load finite edges from output from triangle\n \"\"\"\n fp = open(edge_filename,'rt')\n n_edges,n_markers = map(int,fp.readline().split())\n # indexed into corresponding node file:\n edges = []\n \n for i in range(n_edges):\n vals=map(int,fp.readline().split()[:3])\n if vals[2] == -1:\n continue # it's a ray\n edges.append( vals[1:3] )\n fp.close()\n\n return np.array(edges)\n\ndef plot_voronoi(poly_filename):\n vor_node = poly_file.replace('.poly','.1.v.node')\n vor_edge = poly_file.replace('.poly','.1.v.edge')\n\n # load the vor nodes and show them:\n vor_nodes = load_triangle_nodes(vor_node)\n plt.plot(vor_nodes[:,0],vor_nodes[:,1],'r+')\n\n vor_edges = load_triangle_edges(vor_edge)\n\n # plot the finite edges:\n # build up the list of lines:\n all_lines = vor_nodes[vor_edges]\n coll = LineCollection(all_lines)\n ax = plt.gca()\n ax.add_collection(coll)\n\n\ndef load_triangle_elements(ele_file):\n fp = open(ele_file,'rt')\n n_elts, nodes_per_elt, n_attrs = map(int,fp.readline().split())\n\n tris = np.zeros( (n_elts,3), np.int32)\n\n for i in range(n_elts):\n dummy, tris[i,0],tris[i,1],tris[i,2] = map(int,fp.readline().split()[:4])\n return tris\n\ndef plot_elements(tris,nodes):\n edges = set()\n for t in range(tris.shape[0]):\n t_verts = np.sorted(tris[t])\n edges.add( (t_verts[0],t_verts[1]) )\n edges.add( (t_verts[0],t_verts[2]) )\n edges.add( (t_verts[1],t_verts[2]) )\n\n edges = np.array(list(edges))\n\n all_lines = nodes[edges]\n coll = LineCollection(all_lines)\n ax = plt.gca()\n ax.add_collection(coll)\n\n\n# that writes out these files:\n# node_file = poly_file.replace('.poly','.1.node')\n# element_file = poly_file.replace('.poly','.1.ele')\n\n\n# tris = load_triangle_elements(element_file)\n# nodes = load_triangle_nodes(node_file)\n# plot_elements(tris,nodes)\n \n# Look into how to compute the local radius based on the voronoi\n# diagram:\n\n# Find the radius at each voronoi center:\n# 1. load the voronoi nodes:\n\n\n# some sort of issue loading the tri information - might be worth\n# trying it w/o any islands, but first taking a look...\n# nodes: 2-D, 0 attributes, 1 boundary marker\n# 8690 nodes (compare to 8003 nodes in input)\n\n\nclass Graph(object):\n def __init__(self,basename):\n node_file = basename + '.node'\n if os.path.exists(node_file):\n self.nodes = load_triangle_nodes(node_file)\n else:\n self.nodes = None\n\n edge_file = basename + '.edge'\n if os.path.exists(edge_file):\n self.edges = load_triangle_edges(edge_file)\n else:\n self.edges = None\n\n element_file = basename + '.ele'\n if os.path.exists(element_file):\n self.elements = load_triangle_elements(element_file)\n else:\n self.elements = None\n \n def plot(self,colors=None):\n if self.edges is not None:\n self.plot_edges(colors=colors)\n else:\n self.plot_elements(colors=colors)\n \n def plot_edges(self,colors=None):\n all_lines = self.nodes[self.edges]\n coll = LineCollection(all_lines)\n if colors is not None:\n coll.set_array(colors)\n ax = plt.gca()\n ax.add_collection(coll)\n plt.draw()\n \n def plot_elements(self,colors=None):\n i = np.array([0,1,2,0])\n \n all_lines = self.nodes[self.elements[:,i]]\n coll = LineCollection(all_lines)\n if colors is not None:\n coll.set_array(colors)\n ax = plt.gca()\n ax.add_collection(coll)\n plt.draw()\n\n _vcenters = None\n def vcenters(self):\n if self.elements is None:\n raise Exception(\"vcenters() called but elements is None\")\n \n if self._vcenters is None:\n # just copied from trigrid\n self._vcenters = np.zeros(( len(self.elements),2 ), np.float64)\n\n p1x = self.nodes[self.elements[:,0]][:,0]\n p1y = self.nodes[self.elements[:,0]][:,1]\n p2x = self.nodes[self.elements[:,1]][:,0]\n p2y = self.nodes[self.elements[:,1]][:,1]\n p3x = self.nodes[self.elements[:,2]][:,0]\n p3y = self.nodes[self.elements[:,2]][:,1]\n\n # taken from TRANSFORMER_gang.f90\n dd=2.0*((p1x-p2x)*(p1y-p3y) -(p1x-p3x)*(p1y-p2y))\n b1=p1x**2+p1y**2-p2x**2-p2y**2\n b2=p1x**2+p1y**2-p3x**2-p3y**2 \n xc=(b1*(p1y-p3y)-b2*(p1y-p2y))/dd\n yc=(b2*(p1x-p2x)-b1*(p1x-p3x))/dd\n\n self._vcenters[:,0] = xc\n self._vcenters[:,1] = yc\n return self._vcenters\n _radii = None\n def radii(self):\n if self._radii is None:\n vcenters = self.vcenters()\n vcorners = self.nodes[self.elements[:,0]]\n self._radii = np.sqrt( ((vcenters - vcorners)**2).sum(axis=1) )\n\n return self._radii\n\n _nodes2elements = None\n def nodes2elements(self,n1,n2):\n if self._nodes2elements is None:\n e2e = {}\n print(\"building hash of edges to elements\")\n for c in range(len(self.elements)):\n for i in range(3):\n a = self.elements[c,i]\n b = self.elements[c,(i+1)%3]\n if a > b:\n a,b = b,a\n\n k = (a,b)\n\n if not e2e.has_key(k):\n e2e[k] = []\n\n e2e[ k ].append(c)\n \n self._nodes2elements = e2e\n \n print(\"done\")\n\n if n1 > n2:\n n1,n2 = n2,n1\n return self._nodes2elements[(n1,n2)]\n \n\nclass Boundary(object):\n n_cleaned = 0 # bean-counter for remove_repeated\n \n def __init__(self,geo=None,nodes=None,clean_geo=True):\n \"\"\" \n geo: a Shapely polygon (with holes, ok)\n nodes: an array of points, taken to be the exterior ring of a polygon\n clean_geo: if true, traverse the rings and removed repeated nodes\n \"\"\"\n \n if geo:\n all_nodes = []\n all_edges = []\n holes = []\n start_n = 0\n\n rings = [geo.exterior] + list(geo.interiors)\n for ring in rings:\n orig_nodes = np.array(ring.coords)\n if clean_geo:\n orig_nodes = self.remove_repeated(orig_nodes)\n \n # remove repeated last coordinate\n these_nodes = orig_nodes[:-1] \n \n n_nodes = these_nodes.shape[0]\n n = np.arange(n_nodes)\n these_edges = start_n + np.transpose( np.array([n,(n+1)%n_nodes]) )\n \n all_nodes.append(these_nodes)\n all_edges.append(these_edges)\n start_n += n_nodes\n\n ring_poly = geometry.Polygon( these_nodes )\n point_inside = point_in_polygon(ring_poly)\n holes.append(point_inside)\n \n self.nodes = np.concatenate( all_nodes ) # array(geo.exterior.coords)[:-1,:]\n self.edges = np.concatenate( all_edges )\n self.holes = np.array(holes[1:])\n self.geo = geo\n\n if clean_geo:\n print(\"Removed %i repeated nodes\"%self.n_cleaned)\n else:\n self.nodes = nodes\n \n n_nodes = self.nodes.shape[0]\n # construct an edge array that just matches consecutive\n # nodes\n n = np.arange(n_nodes)\n self.edges = np.transpose(np.array([n,(n+1)%n_nodes]))\n self.holes = np.zeros((0,2))\n\n # automatically find a basic lower-bound length scale\n min_dist_sqr = (((self.nodes[1:] - self.nodes[:-1])**2).sum(axis=1)).min()\n self.min_edge_length = np.sqrt(min_dist_sqr)\n #print(\"Minimum edge length in boundary inputs is \",self.min_edge_length)\n\n self._vor = None\n self._tri = None\n\n _nodes2edge = None\n def nodes2edge(self,a,b):\n # if a,b is boundary edge, return the edge id, otherwise return None\n if self._nodes2edge is None:\n self._nodes2edge = {}\n\n for e in range(len(self.edges)):\n c,d = self.edges[e]\n if c > d:\n d,c = c,d\n\n self._nodes2edge[ (c,d) ] = e\n if a>b:\n b,a = a,b\n k = (a,b)\n if self._nodes2edge.has_key(k):\n return self._nodes2edge[k]\n else:\n return None\n \n def remove_repeated(self,ring):\n \"\"\"Remove repeated nodes from an array.\n \"\"\"\n mask = np.zeros( len(ring),np.bool8 )\n\n mask[:-1] = np.all(ring[:-1]==ring[1:],axis=1)\n \n # for i in range(len(ring)-1):\n # if all(ring[i+1]==ring[i]):\n # mask[i] = True\n self.n_cleaned += mask.sum()\n \n return ring[~mask,:]\n \n def vor(self):\n if self._vor is None:\n self.triangulate()\n return self._vor\n def triangulation(self):\n if self._tri is None:\n self.triangulate()\n return self._tri\n\n def plot(self,colors=None):\n all_lines = self.nodes[self.edges]\n coll = LineCollection(all_lines)\n if colors is not None:\n coll.set_array(colors)\n ax = plt.gca()\n ax.add_collection(coll)\n\n # if len(self.holes) > 0:\n # plot(self.holes[:,0],self.holes[:,1],'ro')\n \n plt.draw()\n def plot_lines(self):\n plt.plot(self.nodes[:,0], self.nodes[:,1], 'k')\n\n def split_edges(self,edge_indexes):\n new_nodes = np.nan * np.ones((len(edge_indexes),2), np.float64)\n new_edges = -1 * np.ones((len(edge_indexes),2), np.int32)\n\n # remember what the next free edge and node are\n next_edge = self.edges.shape[0]\n next_node = self.nodes.shape[0]\n\n # extend nodes and edges:\n self.nodes = np.concatenate( (self.nodes,new_nodes), axis=0 )\n self.edges = np.concatenate( (self.edges,new_edges), axis=0 )\n\n ordering = np.arange(self.nodes.shape[0],dtype=np.float64)\n ordering[next_node:] = -1\n\n for i in range(len(edge_indexes)):\n # node indices to the old endpoints\n pntA,pntC = self.edges[edge_indexes[i]]\n pntB = next_node+i\n \n self.nodes[pntB] = 0.5*(self.nodes[pntA] + self.nodes[pntC])\n\n self.edges[edge_indexes[i],1] = pntB\n self.edges[next_edge+i] = [pntB,pntC]\n ordering[pntB] = 0.5*(ordering[pntA]+ordering[pntC])\n \n new_order = np.argsort(ordering)\n # so j = new_order[i] means that old node j will get mapped\n # to new node i\n self.nodes = self.nodes[new_order]\n\n # the \"inverse\" of new_order\n mapping = np.argsort(new_order)\n\n # not sure about this. too late to prove it to myself that\n # it works short of just testing it\n self.edges = mapping[self.edges]\n self._nodes2edge = None\n\n def write_poly(self,poly_filename):\n \"\"\" write a triangle compatible poly file\n \"\"\"\n # and then only the exterior ring:\n point_list = self.nodes\n \n # probably unnecessary\n if np.all(point_list[0]==point_list[-1]):\n raise Exception(\"Boundary should have already stripped any repeated endpoints\")\n\n npoints = point_list.shape[0]\n\n # triangle wants the basic planar-straight-line-graph\n poly_fp = open(poly_filename,'wt')\n\n # first line is\n # First line: <# of vertices> <dimension (must be 2)> <# of attributes> <# of boundary markers (0 or 1)>\n poly_fp.write(\"%i 2 0 0\\n\"%(npoints))\n # Write out vertices\n for i in range(npoints):\n # <vertex #> <x> <y> [attributes] [boundary marker] \n poly_fp.write(\"%i %f %f\\n\"%(i,point_list[i,0],point_list[i,1]))\n # Write out segments\n # <# of segments> <# of boundary markers (0 or 1)>\n poly_fp.write(\"%i 0\\n\"%(npoints))\n for i in range(len(self.edges)):\n # <segment #> <endpoint> <endpoint> [boundary marker]\n poly_fp.write(\"%i %i %i\\n\"%(i,self.edges[i,0],self.edges[i,1]))\n # number of holes\n poly_fp.write( \"%d\\n\"%self.holes.shape[0] )\n for i in range(self.holes.shape[0]):\n poly_fp.write(\"%d %f %f\\n\"%(i, self.holes[i,0], self.holes[i,1]) )\n poly_fp.close()\n \n # def triangulate(self):\n # ### Run some triangle stuff:\n # poly_file = \"test2.poly\"\n # self.write_poly(poly_file)\n # \n # cmd = \"%s -e -D -p -v %s\"%(triangle_path,poly_file)\n # subprocess.call(cmd,shell=True) # ,stdout=file('/dev/null','w') )\n # \n # # probably we should get the real geometry that was used, otherwise\n # # things will get confusing\n # self.read_poly('test2.1.poly')\n # \n # self._tri = Graph('test2.1')\n # self._vor = VoronoiDiagram('test2.1.v')\n\n def read_poly(self,poly_file):\n \"\"\" After triangulating, there may have been Steiner points\n added, and they will exist in the output .poly file.\n This reads that file and replaces self.nodes and self.edges\n with the information in the given polyfile. Holes will be\n kept the same (although it would be valid to re-read holes, too.\n\n \"\"\"\n poly_fp = open(poly_file,'rt')\n new_edges = []\n new_nodes = []\n\n n_nodes,dim,n_attrs,n_markers = map(int,poly_fp.readline().split())\n if n_nodes == 0:\n # print(\"Reading nodes from separate file\")\n new_nodes = load_triangle_nodes(poly_file.replace('.poly','.node'))\n else:\n raise Exception(\"Not ready for reading inline nodes\")\n\n n_segments,n_markers = map(int,poly_fp.readline().split())\n new_edges = np.zeros((n_segments,dim), np.int32)\n\n for i in range(n_segments):\n vals = map(int,poly_fp.readline().split())\n new_edges[i] = vals[1:3]\n\n # install the new data:\n self.edges = new_edges\n self.nodes = new_nodes\n self.geo = None\n self.src = poly_file\n \n def subdivide(self):\n \"\"\" Find edges that need to be sampled with smaller\n steps and divide them into two edges.\n returns the number of new edges / nodes\n\n method: calculate voronoi radii\n iterate over edges in boundary\n for each edge, find the voronoi point that they have\n in common. So this edge should be part of a triangle,\n and we are getting the center of that triangle.\n\n the voronoi radius with the distance between the voronoi\n point and the edge. If the edge is too long and needs to\n be subdivided, it will be long (and the voronoi radius large)\n compared to the distance between the edge and the vor. center.\n\n Can this be done without the vor. radii?\n need \n \"\"\"\n\n # the old way calculated voronoi radii and searched for nodes\n # on those circumcircles. For subdividing, we just need to match\n # each edge with the one voronoi point it belongs to.\n \n # vor = self.vor()\n # vor.calc_radii(self.nodes)\n\n # the new way - calculated voronoi points directly from the triangles\n # in the delaunay triangulation, then match with edges with a hash\n # on edge [a,b] node pairs\n triangulation = self.triangulation()\n vcenters = triangulation.vcenters()\n\n n_edges = self.edges.shape[0]\n to_subdivide = np.zeros(n_edges, np.float64)\n\n\n # the only way this works is for the boundary nodes to be exactly\n # the same, so we go boundary edge -> nodes -> delaunay element\n if np.any( self.nodes != triangulation.nodes ):\n raise Exception(\"Triangulation and boundary use different nodes.\")\n \n print(\"Choosing edges to subdivide\")\n for i in range(n_edges): # over boundary edges\n a,b = self.edges[i]\n elements = triangulation.nodes2elements(a,b)\n\n if len(elements) != 1:\n print(\"Edge %d,%d mapped to elements %s\"%(a,b,elements))\n raise Exception(\"Boundary edges should map to exactly one element\")\n \n element = elements[0]\n\n # compute the point-line distance between\n # this edge and the v center, then compare to\n # the distance from the endpoint to that\n # vcenter\n pntV = vcenters[element]\n pntA = self.nodes[a]\n pntB = self.nodes[b]\n\n v_radius = np.sqrt( ((pntA-pntV)**2).sum() )\n line_clearance = np.sqrt( (( 0.5*(pntA+pntB) - pntV)**2).sum() )\n\n if v_radius > 1.2*line_clearance and v_radius > self.min_edge_length:\n # second check - make sure that neither AC nor BC are also on the\n # boundary\n p1,p2,p3 = triangulation.elements[element]\n count = 0\n if self.nodes2edge(p1,p2) is not None:\n count += 1\n if self.nodes2edge(p2,p3) is not None:\n count += 1\n if self.nodes2edge(p3,p1) is not None:\n count += 1\n\n if count == 1:\n to_subdivide[i] = 3\n elif count == 0:\n global bad_boundary\n bad_boundary = self\n print(\"While looking at edge %d=(%d,%d)\"%(i,a,b))\n raise Exception(\"We should have found at least 1 boundary edge\")\n elif count == 3:\n print(\"WARNING: Unexpected count of boundary edges in one element: \",count)\n # if 2, then it's a corner and we probably don't want to subdivide\n\n self.to_subdivide = to_subdivide\n bad_edges = where(to_subdivide)[0]\n self.split_edges( bad_edges )\n\n # invalidate these:\n self._vor = None\n self._tri = None\n return len(bad_edges)\n\n def subdivide_iterate(self):\n while 1:\n n_new = self.subdivide()\n print(\"Subdivide made %d new nodes\"%n_new)\n if n_new == 0:\n break\n\nclass VoronoiDiagram(Graph):\n radii = None\n dual_nodes = None\n dual_lookup = {}\n \n def calc_radii(self,del_nodes):\n \"\"\" for each of the voronoi points, find it's radius and\n which delaunay points are responsible for it.\n \"\"\"\n n_nodes = self.nodes.shape[0]\n \n self.radii = np.zeros( n_nodes, np.float64)\n self.dual_nodes = [None]*n_nodes\n self.dual_lookup = {} # map dual node index to list of vcenters\n\n # this is where all the time goes!\n # so make a field for the delaunay nodes that will speed up finding them\n I = np.arange(len(del_nodes))\n \n del_field = field.XYZField(del_nodes, 'nope')\n del_field.build_index()\n \n for i in range(n_nodes):\n if i % 1000 == 0:\n print(i)\n\n # find the nearest one...\n nearest = del_field.nearest(self.nodes[i])\n min_radius = np.sqrt( ((del_nodes[nearest] - self.nodes[i])**2).sum() )\n all_near = del_field.within_r(self.nodes[i], 1.00000001*min_radius)\n \n # dists_sqr = ((del_nodes - self.nodes[i,:])**2).sum(axis=1)\n # rad_sqr = dists_sqr.min()\n # self.dual_nodes[i] = find( dists_sqr <= 1.00001*rad_sqr )\n self.dual_nodes[i] = np.array(all_near)\n\n for dual_node_idx in self.dual_nodes[i]:\n if not self.dual_lookup.has_key(dual_node_idx):\n self.dual_lookup[dual_node_idx] = []\n self.dual_lookup[dual_node_idx].append(i)\n \n self.radii[i] = min_radius # sqrt(rad_sqr)\n\n\n def merge_points(self,tol):\n \"\"\" After a call to calc_radii(), this can be called to coalesce voronio points\n that are close to each other\n \"\"\"\n\n while len(self.nodes) > 1:\n # look for short edges:\n edge_ends = self.nodes[ self.edges ]\n\n edge_centers = edge_ends.mean(axis=1)\n edge_tols = tol(edge_centers)\n\n edge_lengths = np.sqrt( ((edge_ends[:,1,:] - edge_ends[:,0,:])**2).sum(axis=1) )\n rel_edge_lengths = edge_lengths / edge_tols\n\n to_merge = np.argmin(rel_edge_lengths)\n\n if rel_edge_lengths[ to_merge ] < 1.0:\n # print(\" got an edge to merge.\")\n self.merge_edge( to_merge )\n else:\n break\n \n def merge_edge(self,e):\n a,b = self.edges[e]\n # print(\"merging voronoi edge \",a,b)\n\n self.edges = np.concatenate( (self.edges[:e], self.edges[e+1:]) )\n\n # map old node indices to new ones:\n node_mapping = np.arange(len(self.nodes))\n # b has become a\n node_mapping[b] = a\n # and everybody greater than b is shifted down\n node_mapping[ node_mapping > b] -= 1\n\n if self.radii is not None:\n self.radii = np.concatenate( (self.radii[:b], self.radii[b+1:]) )\n\n # combine their dual nodes:\n self.dual_nodes[a] = np.unique( np.concatenate( (self.dual_nodes[a],self.dual_nodes[b]) ) )\n\n # then remove b from the list\n self.dual_nodes = self.dual_nodes[:b] + self.dual_nodes[b+1:]\n \n for k in self.dual_lookup.keys():\n l = self.dual_lookup[k]\n # k is an index to the boundary points\n # l is a list of indices to voronoi centers\n \n if b in l:\n l.remove( b )\n if not a in l:\n l.append(a)\n\n # keep it as a list for now.\n self.dual_lookup[k] = node_mapping[ np.array(l) ].tolist()\n\n # new node is between the old two nodes:\n self.nodes[a] = 0.5*(self.nodes[a] + self.nodes[b])\n\n self.edges = node_mapping[ self.edges ]\n\n self.nodes = np.concatenate( (self.nodes[:b], self.nodes[b+1:] ) )\n\n def centers_for_dual_node(self,dual_node):\n if self.dual_lookup.has_key(dual_node):\n return self.dual_lookup[dual_node]\n else:\n return []\n \n def plot_radii(self):\n a = gca()\n\n for i in range(self.nodes.shape[0]):\n cir = Circle( self.nodes[i], radius=self.radii[i])\n a.add_patch(cir)\n\n def plot_vor_points(self):\n try:\n colors = self.radii\n print(\"Got colors from radii\")\n plt.scatter(self.nodes[:,0],self.nodes[:,1],50,colors,\n lw=0,vmin=200,vmax=250)\n except:\n plt.plot(self.nodes[:,0],self.nodes[:,1],'r+')\n \n def plot(self,show_vor_points=True):\n if show_vor_points:\n self.plot_vor_points()\n\n # plot the finite edges:\n # build up the list of lines:\n all_lines = self.nodes[self.edges]\n coll = LineCollection(all_lines)\n coll.set_color('m')\n ax = plt.gca()\n ax.add_collection(coll)\n\n plt.draw()\n\n\n# since the triangulation didn't add any nodes, just\n# use the boundaries nodes instead of tri.nodes\n\n\n# ### Check radius against edge / voronoi center\n# if __name__ == '__main__':\n# ### Load the data\n# # boundary = load_shp.Boundary('/home/rusty/classes/research/meshing/dumbarton.shp')\n# \n# # this is full bay, already filtered at 50m\n# boundary = load_shp.Boundary('/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/sfbay-100km-arc/sfbay-100km-arc-50_20.shp')\n# \n# geo = boundary.geo\n# \n# # points = array( geo.exterior.coords )\n# # points = points[:-1]\n# \n# # from paver import upsample_linearring\n# # points = upsample_linearring(points,50)\n# \n# bdry_ma = Boundary( geo=geo )\n# print(\"subdividing...\")\n# bdry_ma.subdivide_iterate()\n# print(\"done\")\n# \n# vor = bdry_ma.vor()\n# #tri = bdry_ma.tri()\n# #tri.plot()\n# \n# print(\"Calculating radii\")\n# vor.calc_radii(bdry_ma.nodes)\n# print(\"done\")\n# \n# bdry_ma.plot()\n# bdry_ma.vor().plot_vor_points()\n# plt.axis('equal')\n# plt.draw()\n\n"
] |
[
[
"numpy.cos",
"numpy.array",
"numpy.allclose",
"numpy.linspace"
],
[
"numpy.array"
],
[
"matplotlib.pyplot.gca",
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"matplotlib.collections.LineCollection",
"numpy.arange",
"numpy.sorted",
"matplotlib.patches.Circle",
"matplotlib.pyplot.draw",
"numpy.all",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.argmin",
"numpy.any",
"numpy.argsort",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vinitra-zz/sklearn-onnx
|
[
"a8f2657525d0b4dd279bcd1a971397d002929a77",
"a8f2657525d0b4dd279bcd1a971397d002929a77"
] |
[
"skl2onnx/operator_converters/naive_bayes.py",
"tests/test_sklearn_nearest_neighbour_converter.py"
] |
[
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport numpy as np\nfrom ..proto import onnx_proto\nfrom ..common._apply_operation import (\n apply_add, apply_cast, apply_div, apply_exp,\n apply_log, apply_mul, apply_pow, apply_sub, apply_reshape,\n)\nfrom ..common.data_types import Int64TensorType\nfrom ..common._registration import register_converter\nfrom ..common.utils_classifier import get_label_classes\n\n\ndef _joint_log_likelihood_bernoulli(\n scope, container, input_name, feature_log_prob_name,\n class_log_prior_name, binarize, feature_count, proto_type,\n sum_op_version, sum_result_name):\n \"\"\"\n Calculate joint log likelihood for Bernoulli Naive Bayes model.\n \"\"\"\n constant_name = scope.get_unique_variable_name('constant')\n exp_result_name = scope.get_unique_variable_name('exp_result')\n sub_result_name = scope.get_unique_variable_name('sub_result')\n neg_prob_name = scope.get_unique_variable_name('neg_prob')\n sum_neg_prob_name = scope.get_unique_variable_name('sum_neg_prob')\n difference_matrix_name = scope.get_unique_variable_name(\n 'difference_matrix')\n dot_prod_name = scope.get_unique_variable_name('dot_prod')\n partial_sum_result_name = scope.get_unique_variable_name(\n 'partial_sum_result')\n # Define constant slightly greater than 1 to avoid log 0\n # scenarios when calculating log (1 - x) and x=1 in line 70\n container.add_initializer(constant_name, proto_type, [], [1.000000001])\n\n if binarize is not None:\n threshold_name = scope.get_unique_variable_name('threshold')\n condition_name = scope.get_unique_variable_name('condition')\n cast_values_name = scope.get_unique_variable_name('cast_values')\n zero_tensor_name = scope.get_unique_variable_name('zero_tensor')\n binarised_input_name = scope.get_unique_variable_name(\n 'binarised_input')\n num_features = feature_count.shape[1]\n\n container.add_initializer(threshold_name, proto_type,\n [1], [binarize])\n container.add_initializer(\n zero_tensor_name,\n proto_type, [1, num_features],\n np.zeros((1, num_features)).ravel())\n\n container.add_node(\n 'Greater', [input_name, threshold_name],\n condition_name, name=scope.get_unique_operator_name('Greater'),\n op_version=9)\n apply_cast(scope, condition_name, cast_values_name, container,\n to=proto_type)\n apply_add(scope, [zero_tensor_name, cast_values_name],\n binarised_input_name, container, broadcast=1)\n input_name = binarised_input_name\n\n apply_exp(scope, feature_log_prob_name, exp_result_name, container)\n apply_sub(scope, [constant_name, exp_result_name], sub_result_name,\n container, broadcast=1)\n apply_log(scope, sub_result_name, neg_prob_name, container)\n container.add_node('ReduceSum', neg_prob_name,\n sum_neg_prob_name, axes=[0],\n name=scope.get_unique_operator_name('ReduceSum'))\n apply_sub(scope, [feature_log_prob_name, neg_prob_name],\n difference_matrix_name, container)\n container.add_node(\n 'MatMul', [input_name, difference_matrix_name],\n dot_prod_name, name=scope.get_unique_operator_name('MatMul'))\n container.add_node(\n 'Sum', [sum_neg_prob_name, dot_prod_name],\n partial_sum_result_name, op_version=sum_op_version,\n name=scope.get_unique_operator_name('Sum'))\n container.add_node(\n 'Sum', [partial_sum_result_name, class_log_prior_name],\n sum_result_name, name=scope.get_unique_operator_name('Sum'),\n op_version=sum_op_version)\n return sum_result_name\n\n\ndef _joint_log_likelihood_gaussian(\n scope, container, input_name, model, proto_type, sum_result_name):\n \"\"\"\n Calculate joint log likelihood for Gaussian Naive Bayes model.\n \"\"\"\n features = model.theta_.shape[1]\n jointi = np.log(model.class_prior_)\n sigma_sum_log = - 0.5 * np.sum(np.log(2. * np.pi * model.sigma_), axis=1)\n theta_name = scope.get_unique_variable_name('theta')\n sigma_name = scope.get_unique_variable_name('sigma')\n sigma_sum_log_name = scope.get_unique_variable_name('sigma_sum_log')\n jointi_name = scope.get_unique_variable_name('jointi')\n exponent_name = scope.get_unique_variable_name('exponent')\n prod_operand_name = scope.get_unique_variable_name('prod_operand')\n reshaped_input_name = scope.get_unique_variable_name('reshaped_input')\n subtracted_input_name = scope.get_unique_variable_name('subtracted_input')\n pow_result_name = scope.get_unique_variable_name('pow_result')\n div_result_name = scope.get_unique_variable_name('div_result')\n reduced_sum_name = scope.get_unique_variable_name('reduced_sum')\n mul_result_name = scope.get_unique_variable_name('mul_result')\n part_log_likelihood_name = scope.get_unique_variable_name(\n 'part_log_likelihood')\n\n theta = model.theta_.reshape((1, -1, features))\n sigma = model.sigma_.reshape((1, -1, features))\n\n container.add_initializer(theta_name, proto_type, theta.shape,\n theta.ravel())\n container.add_initializer(sigma_name, proto_type, sigma.shape,\n sigma.ravel())\n container.add_initializer(jointi_name, proto_type, [1, jointi.shape[0]],\n jointi)\n container.add_initializer(\n sigma_sum_log_name, proto_type,\n [1, sigma_sum_log.shape[0]], sigma_sum_log.ravel())\n container.add_initializer(exponent_name, proto_type, [], [2])\n container.add_initializer(prod_operand_name, proto_type, [], [0.5])\n\n apply_reshape(scope, input_name, reshaped_input_name, container,\n desired_shape=[-1, 1, features])\n apply_sub(scope, [reshaped_input_name, theta_name], subtracted_input_name,\n container, broadcast=1)\n apply_pow(scope, [subtracted_input_name, exponent_name], pow_result_name,\n container, broadcast=1)\n apply_div(scope, [pow_result_name, sigma_name], div_result_name,\n container, broadcast=1)\n container.add_node('ReduceSum', div_result_name,\n reduced_sum_name, axes=[2], keepdims=0,\n name=scope.get_unique_operator_name('ReduceSum'))\n apply_mul(scope, [reduced_sum_name, prod_operand_name], mul_result_name,\n container, broadcast=1)\n apply_sub(scope, [sigma_sum_log_name, mul_result_name],\n part_log_likelihood_name,\n container, broadcast=1)\n apply_add(scope, [jointi_name, part_log_likelihood_name],\n sum_result_name, container, broadcast=1)\n return sum_result_name\n\n\ndef convert_sklearn_naive_bayes(scope, operator, container):\n # Computational graph:\n #\n # Note: In the following graph, variable names are in lower case\n # characters only and operator names are in upper case characters.\n # We borrow operator names from the official ONNX spec:\n # https://github.com/onnx/onnx/blob/master/docs/Operators.md\n # All variables are followed by their shape in [].\n #\n # Symbols:\n # M: Number of instances\n # N: Number of features\n # C: Number of classes\n # input(or x): input\n # output(or y): output (There are two paths for producing output, one for\n # string labels and the other one for int labels)\n # output_probability: class probabilties\n # feature_log_prob: Empirical log probability of features given a\n # class, P(x_i|y)\n # class_log_prior: Smoothed empirical log probability for each class\n #\n # Multinomial NB\n # Equation:\n # y = argmax (class_log_prior + X . feature_log_prob^T)\n #\n # Graph:\n #\n # input [M, N] -> MATMUL <- feature_log_prob.T [N, C]\n # |\n # V\n # matmul_result [M, C] -> CAST <- proto_type\n # |\n # V\n # cast_result [M, C] -> SUM <- class_log_prior [1, C]\n # |\n # .-----------------'\n # |\n # V\n # sum_result [M, C] -> ARGMAX -> argmax_output [M, 1]\n # |\n # V\n # classes [C] -----> ARRAYFEATUREEXTRACTOR\n # |\n # V (string labels)\n # array_feature_extractor_result [M, 1] --------------------------.\n # (int labels) | |\n # V |\n # CAST(to=proto_type) |\n # | |\n # V |\n # cast2_result [M, 1] |\n # | |\n # V |\n # output_shape [1] -> RESHAPE |\n # | |\n # V V\n # reshaped_result [M,] .-----RESHAPE\n # | |\n # V V\n # (to=onnx_proto.TensorProto.INT64)CAST --------> output [M,]\n #\n # Bernoulli NB\n # Equation:\n # y = argmax (class_log_prior + \\sum neg_prob\n # + X . (feature_log_prob - neg_prob))\n # neg_prob = log( 1 - e ^ feature_log_prob)\n #\n # Graph:\n #\n # .---------------------------------------------------------.\n # | |\n # feature_log_prob.T [N, C] -> EXP -> exp_result [N, C] |\n # | |\n # .----------------------' |\n # | |\n # V V\n # constant -> SUB -> sub_result [N, C] -> LOG -> neg_prob [N, C] -> SUB\n # | |\n # .---------' .---------'\n # | |\n # V V\n # .----------- sum_neg_prob [1, C] <- REDUCE_SUM difference_matrix [N, C]\n # | |\n # | .----------------------------------'\n # | |\n # | V\n # | input [M, N] -> MATMUL -> dot_product [M, C]\n # | |\n # | V\n # '------------------------------------> SUM\n # |\n # V\n # class_log_prior [1, C] -> SUM <- partial_sum_result [M, C]\n # |\n # V\n # sum_result [M, C] -> ARGMAX -> argmax_output [M, 1]\n # |\n # V\n # classes [C] -------> ARRAYFEATUREEXTRACTOR\n # |\n # .------------------------------------'\n # |\n # V (string labels)\n # array_feature_extractor_result [M, 1] ----------------.\n # (int labels) | |\n # V |\n # CAST(to=proto_type) |\n # | |\n # V |\n # cast2_result [M, 1] |\n # | |\n # V |\n # output_shape [1] -> RESHAPE |\n # | |\n # V V\n # reshaped_result [M,] RESHAPE\n # | |\n # V |\n # (to=onnx_proto.TensorProto.INT64)CAST -> output [M,] <-'\n #\n #\n # If model's binarize attribute is not null, then input of\n # Bernoulli NB is produced by the following graph:\n #\n # input [M, N] -> GREATER <- threshold [1]\n # | |\n # | V\n # | condition [M, N] -> CAST(to=proto_type)\n # | |\n # | V\n # | cast_values [M, N]\n # | |\n # V V\n # CONSTANT_LIKE -> zero_tensor [M, N] -> ADD\n # |\n # V\n # input [M, N] <- binarised_input [M, N]\n #\n # Sub-graph for probability calculation common to both Multinomial\n # and Bernoulli Naive Bayes\n #\n # sum_result [M, C] -> REDUCELOGSUMEXP -> reduce_log_sum_exp_result [M,]\n # | |\n # | V\n # | log_prob_shape [2] -> RESHAPE\n # | |\n # '------------> SUB <-- reshaped_log_prob [M, 1] <---'\n # |\n # V\n # log_prob [M, C] -> EXP -> prob_tensor [M, C] -.\n # |\n # output_probability [M, C] <- ZIPMAP <---------------------'\n float_dtype = container.dtype\n proto_type = container.proto_dtype\n\n nb_op = operator.raw_operator\n classes = get_label_classes(scope, nb_op)\n output_shape = (-1,)\n\n sum_result_name = scope.get_unique_variable_name('sum_result')\n argmax_output_name = scope.get_unique_variable_name('argmax_output')\n cast2_result_name = scope.get_unique_variable_name('cast2_result')\n reshaped_result_name = scope.get_unique_variable_name('reshaped_result')\n classes_name = scope.get_unique_variable_name('classes')\n reduce_log_sum_exp_result_name = scope.get_unique_variable_name(\n 'reduce_log_sum_exp_result')\n log_prob_name = scope.get_unique_variable_name('log_prob')\n array_feature_extractor_result_name = scope.get_unique_variable_name(\n 'array_feature_extractor_result')\n\n class_type = onnx_proto.TensorProto.STRING\n if np.issubdtype(classes.dtype, np.floating):\n class_type = onnx_proto.TensorProto.INT32\n classes = classes.astype(np.int32)\n elif np.issubdtype(classes.dtype, np.signedinteger):\n class_type = onnx_proto.TensorProto.INT32\n else:\n classes = np.array([s.encode('utf-8') for s in classes])\n\n container.add_initializer(classes_name, class_type, classes.shape, classes)\n\n if operator.type != 'SklearnGaussianNB':\n class_log_prior_name = scope.get_unique_variable_name(\n 'class_log_prior')\n feature_log_prob_name = scope.get_unique_variable_name(\n 'feature_log_prob')\n\n class_log_prior = nb_op.class_log_prior_.astype(\n float_dtype).reshape((1, -1))\n feature_log_prob = nb_op.feature_log_prob_.T.astype(float_dtype)\n\n container.add_initializer(\n feature_log_prob_name, proto_type,\n feature_log_prob.shape, feature_log_prob.flatten())\n container.add_initializer(\n class_log_prior_name, proto_type,\n class_log_prior.shape, class_log_prior.flatten())\n\n if container.target_opset < 6:\n sum_op_version = 1\n elif container.target_opset < 8:\n sum_op_version = 6\n else:\n sum_op_version = 8\n\n input_name = operator.inputs[0].full_name\n if type(operator.inputs[0].type) == Int64TensorType:\n cast_input_name = scope.get_unique_variable_name('cast_input')\n\n apply_cast(scope, operator.input_full_names, cast_input_name,\n container, to=proto_type)\n input_name = cast_input_name\n\n if operator.type == 'SklearnBernoulliNB':\n sum_result_name = _joint_log_likelihood_bernoulli(\n scope, container, input_name, feature_log_prob_name,\n class_log_prior_name, nb_op.binarize, nb_op.feature_count_,\n proto_type, sum_op_version, sum_result_name)\n elif operator.type == 'SklearnGaussianNB':\n sum_result_name = _joint_log_likelihood_gaussian(\n scope, container, input_name, nb_op,\n proto_type, sum_result_name)\n else:\n # MultinomialNB or ComplementNB\n matmul_result_name = (\n scope.get_unique_variable_name('matmul_result')\n if operator.type == 'SklearnMultinomialNB' or len(classes) == 1\n else sum_result_name)\n\n container.add_node(\n 'MatMul', [input_name, feature_log_prob_name],\n matmul_result_name, name=scope.get_unique_operator_name('MatMul'))\n if operator.type == 'SklearnMultinomialNB' or len(classes) == 1:\n apply_add(scope, [matmul_result_name, class_log_prior_name],\n sum_result_name, container, broadcast=1)\n\n container.add_node('ArgMax', sum_result_name,\n argmax_output_name,\n name=scope.get_unique_operator_name('ArgMax'), axis=1)\n\n # Calculation of class probability\n log_prob_shape = [-1, 1]\n\n reshaped_log_prob_name = scope.get_unique_variable_name(\n 'reshaped_log_prob')\n\n container.add_node('ReduceLogSumExp', sum_result_name,\n reduce_log_sum_exp_result_name,\n name=scope.get_unique_operator_name('ReduceLogSumExp'),\n axes=[1], keepdims=0)\n apply_reshape(scope, reduce_log_sum_exp_result_name,\n reshaped_log_prob_name, container,\n desired_shape=log_prob_shape)\n apply_sub(scope, [sum_result_name, reshaped_log_prob_name], log_prob_name,\n container, broadcast=1)\n apply_exp(scope, log_prob_name, operator.outputs[1].full_name, container)\n container.add_node(\n 'ArrayFeatureExtractor', [classes_name, argmax_output_name],\n array_feature_extractor_result_name, op_domain='ai.onnx.ml',\n name=scope.get_unique_operator_name('ArrayFeatureExtractor'))\n # Reshape op does not seem to handle INT64 tensor even though it is\n # listed as one of the supported types in the doc, so Cast was\n # required here.\n if class_type == onnx_proto.TensorProto.INT32:\n apply_cast(scope, array_feature_extractor_result_name,\n cast2_result_name, container,\n to=proto_type)\n apply_reshape(scope, cast2_result_name, reshaped_result_name,\n container, desired_shape=output_shape)\n apply_cast(scope, reshaped_result_name, operator.outputs[0].full_name,\n container, to=onnx_proto.TensorProto.INT64)\n else: # string labels\n apply_reshape(scope, array_feature_extractor_result_name,\n operator.outputs[0].full_name, container,\n desired_shape=output_shape)\n\n\nregister_converter('SklearnBernoulliNB', convert_sklearn_naive_bayes)\nregister_converter('SklearnComplementNB', convert_sklearn_naive_bayes)\nregister_converter('SklearnGaussianNB', convert_sklearn_naive_bayes)\nregister_converter('SklearnMultinomialNB', convert_sklearn_naive_bayes)\n",
"\"\"\"\nTests scikit-learn's KNeighbours Classifier and Regressor converters.\n\"\"\"\nimport unittest\nfrom distutils.version import StrictVersion\nimport numpy\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nimport onnxruntime\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import convert_sklearn\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nfrom skl2onnx.common.data_types import onnx_built_with_ml\nfrom test_utils import dump_data_and_model, fit_classification_model\n\n\nclass TestNearestNeighbourConverter(unittest.TestCase):\n def _fit_model_binary_classification(self, model):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n y[y == 2] = 1\n model.fit(X, y)\n return model, X\n\n def _fit_model_multiclass_classification(self, model, use_string=False):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n if use_string:\n y = numpy.array([\"cl%d\" % _ for _ in y])\n model.fit(X, y)\n return model, X\n\n def _fit_model(self, model, n_targets=1, label_int=False):\n X, y = datasets.make_regression(n_features=4,\n random_state=0,\n n_targets=n_targets)\n if label_int:\n y = y.astype(numpy.int64)\n model.fit(X, y)\n return model, X\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor(self):\n model, X = self._fit_model(KNeighborsRegressor(n_neighbors=2))\n model_onnx = convert_sklearn(model, \"KNN regressor\",\n [(\"input\", FloatTensorType([None, 4]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:7],\n model, model_onnx,\n basename=\"SklearnKNeighborsRegressor\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor_yint(self):\n model, X = self._fit_model(\n KNeighborsRegressor(n_neighbors=2), label_int=True)\n model_onnx = convert_sklearn(model, \"KNN regressor\",\n [(\"input\", FloatTensorType([None, 4]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:7],\n model, model_onnx,\n basename=\"SklearnKNeighborsRegressorYInt\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor2_1(self):\n model, X = self._fit_model(KNeighborsRegressor(n_neighbors=1),\n n_targets=2)\n model_onnx = convert_sklearn(model, \"KNN regressor\",\n [(\"input\", FloatTensorType([None, 4]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:2],\n model, model_onnx,\n basename=\"SklearnKNeighborsRegressor2\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor2_2(self):\n model, X = self._fit_model(KNeighborsRegressor(n_neighbors=2),\n n_targets=2)\n model_onnx = convert_sklearn(model, \"KNN regressor\",\n [(\"input\", FloatTensorType([None, 4]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:2],\n model, model_onnx,\n basename=\"SklearnKNeighborsRegressor2\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor_weights_distance(self):\n model, X = self._fit_model(\n KNeighborsRegressor(\n weights=\"distance\", algorithm=\"brute\", n_neighbors=1))\n model_onnx = convert_sklearn(model, \"KNN regressor\",\n [(\"input\", FloatTensorType([None, 4]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:3],\n model, model_onnx,\n basename=\"SklearnKNeighborsRegressorWeightsDistance-Dec3\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor_metric_cityblock(self):\n model, X = self._fit_model(KNeighborsRegressor(metric=\"cityblock\"))\n model_onnx = convert_sklearn(model, \"KNN regressor\",\n [(\"input\", FloatTensorType([None, 4]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:7],\n model, model_onnx,\n basename=\"SklearnKNeighborsRegressorMetricCityblock\")\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_classifier_binary_class(self):\n model, X = self._fit_model_binary_classification(\n KNeighborsClassifier())\n model_onnx = convert_sklearn(\n model,\n \"KNN classifier binary\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32),\n model, model_onnx,\n basename=\"SklearnKNeighborsClassifierBinary\")\n\n @unittest.skipIf(True, reason=\"later\")\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_classifier_multi_class(self):\n model, X = self._fit_model_multiclass_classification(\n KNeighborsClassifier())\n model_onnx = convert_sklearn(\n model,\n \"KNN classifier multi-class\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32),\n model, model_onnx,\n basename=\"SklearnKNeighborsClassifierMulti\")\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_classifier_multi_class_string(self):\n model, X = self._fit_model_multiclass_classification(\n KNeighborsClassifier(), use_string=True)\n model_onnx = convert_sklearn(\n model,\n \"KNN classifier multi-class\",\n [(\"input\", FloatTensorType([None, 3]))],\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32),\n model, model_onnx,\n basename=\"SklearnKNeighborsClassifierMulti\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_classifier_weights_distance(self):\n model, X = self._fit_model_multiclass_classification(\n KNeighborsClassifier(weights='distance'))\n model_onnx = convert_sklearn(\n model, 'KNN classifier', [('input', FloatTensorType([None, 3]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:7], model, model_onnx,\n basename=\"SklearnKNeighborsClassifierWeightsDistance\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_classifier_metric_cityblock(self):\n model, X = self._fit_model_multiclass_classification(\n KNeighborsClassifier(metric='cityblock'))\n model_onnx = convert_sklearn(\n model, 'KNN classifier', [('input', FloatTensorType([None, 3]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:7], model, model_onnx,\n basename=\"SklearnKNeighborsClassifierMetricCityblock\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor_int(self):\n model, X = self._fit_model(KNeighborsRegressor())\n X = X.astype(numpy.int64)\n model_onnx = convert_sklearn(\n model,\n \"KNN regressor\",\n [(\"input\", Int64TensorType([None, X.shape[1]]))],\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X,\n model,\n model_onnx,\n basename=\"SklearnGradientBoostingRegressionInt-Dec4\"\n )\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor_equal(self):\n X, y = datasets.make_regression(\n n_samples=1000, n_features=100, random_state=42)\n X = X.astype(numpy.int64)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=42)\n model = KNeighborsRegressor(\n algorithm='brute', metric='manhattan').fit(X_train, y_train)\n model_onnx = convert_sklearn(\n model, 'knn',\n [('input', Int64TensorType([None, X_test.shape[1]]))])\n exp = model.predict(X_test)\n\n sess = InferenceSession(model_onnx.SerializeToString())\n res = sess.run(None, {'input': numpy.array(X_test)})[0]\n\n # The conversion has discrepencies when\n # neighbours are at the exact same distance.\n maxd = 1000\n accb = numpy.abs(exp - res) > maxd\n ind = [i for i, a in enumerate(accb) if a == 1]\n assert len(ind) == 0\n\n accp = numpy.abs(exp - res) < maxd\n acc = numpy.sum(accp)\n ratio = acc * 1.0 / res.shape[0]\n assert ratio >= 0.7\n # assert_almost_equal(exp, res)\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_multi_class_nocl(self):\n model, X = fit_classification_model(\n KNeighborsClassifier(),\n 2, label_string=True)\n model_onnx = convert_sklearn(\n model,\n \"multi-class nocl\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n options={id(model): {'nocl': True}})\n self.assertIsNotNone(model_onnx)\n sonx = str(model_onnx)\n assert 'classlabels_strings' not in sonx\n assert 'cl0' not in sonx\n dump_data_and_model(\n X, model, model_onnx, classes=model.classes_,\n basename=\"SklearnNaiveMultiNoCl\", verbose=False,\n allow_failure=\"StrictVersion(onnx.__version__)\"\n \" < StrictVersion('1.2') or \"\n \"StrictVersion(onnxruntime.__version__)\"\n \" <= StrictVersion('0.2.1')\")\n\n @unittest.skipIf(\n StrictVersion(onnxruntime.__version__) < StrictVersion(\"0.5.0\"),\n reason=\"not available\")\n def test_model_knn_regressor2_2_pipee(self):\n pipe = make_pipeline(StandardScaler(),\n KNeighborsClassifier())\n model, X = self._fit_model_binary_classification(pipe)\n model_onnx = convert_sklearn(\n model, \"KNN pipe\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))])\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X.astype(numpy.float32)[:2],\n model, model_onnx,\n basename=\"SklearnKNeighborsRegressorPipe2\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.log",
"numpy.issubdtype",
"numpy.zeros"
],
[
"numpy.abs",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.datasets.make_regression",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EngBioNUS/BMSS2
|
[
"41163c61a4e0ef3c6430e5954d81a77832e49a9d",
"41163c61a4e0ef3c6430e5954d81a77832e49a9d",
"41163c61a4e0ef3c6430e5954d81a77832e49a9d",
"41163c61a4e0ef3c6430e5954d81a77832e49a9d"
] |
[
"build/lib/BMSS/traceanalysis.py",
"examples/example_2_logic_gate/LogicGate_Not_Double_MaturationSecond.py",
"BMSS/models/model_functions/TestModel_Monod_Constitutive_Double.py",
"BMSS/models/model_functions/BMSS_InducerDegradation_DelayActivation_Inducible_ActiveTransport.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import get_backend\nfrom scipy.stats import skewtest, kurtosistest\n\n###############################################################################\n#Globals\n###############################################################################\n#Refer for details: https://seaborn.pydata.org/tutorial/color_palettes.html\npalette_types = {'color': lambda n_colors, palette='bright', **kwargs : sns.color_palette(palette=palette, n_colors=n_colors),\n 'light': lambda n_colors, color, **kwargs : sns.light_palette(n_colors=n_colors+2, color=color, **kwargs)[2:],\n 'dark' : lambda n_colors, color, **kwargs : sns.dark_palette( n_colors=n_colors+2, color=color, **kwargs)[2:],\n 'cubehelix': lambda n_colors, **kwargs : sns.cubehelix_palette(n_colors=n_colors, **kwargs),\n 'diverging': lambda n, **kwargs : sns.diverging_palette(n=n, **kwargs),\n } \n#Refer for details: https://xkcd.com/color/rgb/\nall_colors = sns.colors.xkcd_rgb\n\n###############################################################################\n#Import\n###############################################################################\ndef import_trace(files, keys=[], **pd_args):\n '''Used for traces that have been saved to csv files.\n \n :param files: A list of file names.\n :param keys: A list of keys to index the traces.\n :param pd_args: Keyword arguments for pandas.read_csv.\n :return traces: Returns a dict of traces.\n \n '''\n data = {}\n for i in range(len(files)):\n key = keys[i+1] if keys else i+1\n data[key] = pd.read_csv(files[i], **pd_args)\n return data \n\n###############################################################################\n#Skewness and Kurtosis\n###############################################################################\ndef check_skewness(traces, output='df'):\n '''Calculates skewness of distribution of sampled parameters\n \n :param traces: A dict of traces.\n :param output: Causes the return value to be formatted as DataFrame.\n :return result: A DataFrame if output is \"df\" and a dict otherwise.\n\n '''\n return scipy_test(skewtest, traces, output='df')\n\ndef check_kurtosis(traces, output='df'):\n '''Calculates kurtosis of distribution of sampled parameters\n \n :param traces: A dict of traces.\n :param output: Causes the return value to be formatted as DataFrame.\n :return result: A DataFrame if output is \"df\" and a dict otherwise.\n\n '''\n return scipy_test(kurtosistest, traces, output='df')\n\ndef scipy_test(test_func, traces, output='df'):\n '''\n :meta private:\n '''\n result = {}\n for label in traces:\n trace = traces[label]\n test_result = test_func(trace, axis=0, nan_policy='omit')\n \n if output == 'df':\n variables = trace.columns.to_list()\n df = pd.DataFrame( test_result, columns=variables, index=('stats', 'pval'))\n result[label] = df\n else:\n result[label] = test_result\n \n return result\n \n###############################################################################\n#Wrapper Functions for Bivariate Plots\n###############################################################################\ndef pairplot_steps(traces, pairs, figs=[], AX={}, gradient=5, palette={}, legend_args={}, plot_args={'marker': '+', 'linewidth':0}, palette_type='light'):\n '''Generates a pair plot between two parameters.\n '''\n return pairplot_wrapper('plot', traces=traces, pairs=pairs, figs=figs, AX=AX, palette=palette, gradient=gradient, palette_type=palette_type, legend_args=legend_args, plot_args=plot_args)\n\ndef pairplot_kde(traces, pairs, figs=[], AX={}, palette={}, legend_args={}, plot_args={}):\n '''Generates a pair plot between two parameters in kde form.\n '''\n return pairplot_wrapper('kde', traces=traces, pairs=pairs, figs=figs, AX=AX, palette=palette, gradient=1, palette_type='light', legend_args=legend_args, plot_args=plot_args)\n\ndef pairplot_wrapper(plot_func, traces, pairs, figs=[], AX={}, palette={}, gradient=1, palette_type='light', legend_args={}, plot_args={}):\n '''\n :meta private:\n '''\n pairs1 = [tuple(pair) for pair in pairs]\n palette1 = make_palette(traces, pairs1, palette, gradient=gradient, palette_type=palette_type)\n figs1, AX1 = figs, AX\n \n for label in traces:\n figs1, AX1 = plot_helper(plot_func, label, traces[label], pairs1, figs=figs1, AX=AX1, palette=palette1[label], legend_args=legend_args, plot_args=plot_args)\n \n [fs(fig) for fig in figs1]\n \n return figs1, AX1\n \n###############################################################################\n#Wrapper Functions for Univariate Plots\n############################################################################### \ndef plot_steps(traces, skip=[], figs=None, AX=None, palette=None, legend_args={}, plot_args={'marker': '+', 'linewidth':0}):\n '''Generates trace plot for parameters.\n \n :param traces: A dict of traces.\n :param skip: A list of parameters to not plot.\n :param figs: A list of figures for containing the plots. Default is None.\n :param AX: A dict of param - Axes object pairs. Default is None.\n :param palette: A dict of colors. Default is None.\n :param legend_args: A dict of arguments for the legend. Default is None.\n :param plot_args: A dict of arguments for plotting such as marker.\n :return result: Figure and Axes objects.\n \n '''\n return singleplot_wrapper('plot', traces, skip=skip, figs=figs, AX=AX, palette=palette, legend_args=legend_args, plot_args=plot_args)\n \ndef plot_kde(traces, skip=[], figs=[], AX={}, palette={}, legend_args={}, plot_args={'linewidth': 3}):\n '''Generates kde plot for parameters.\n \n :param traces: A dict of traces.\n :param skip: A list of parameters to not plot.\n :param figs: A list of figures for containing the plots. Default is None.\n :param AX: A dict of param - Axes object pairs. Default is None.\n :param palette: A dict of colors. Default is None.\n :param legend_args: A dict of arguments for the legend. Default is None.\n :param plot_args: A dict of arguments for plotting such as marker.\n :return result: Figure and Axes objects.\n \n '''\n return singleplot_wrapper('kde', traces, skip=skip, figs=figs, AX=AX, palette=palette, legend_args=legend_args, plot_args=plot_args)\n \ndef plot_hist(traces, skip=[], figs=[], AX={}, palette={}, legend_args={}, plot_args={}):\n '''Generates histogram plot for parameters.\n \n :param traces: A dict of traces.\n :param skip: A list of parameters to not plot.\n :param figs: A list of figures for containing the plots. Default is None.\n :param AX: A dict of param - Axes object pairs. Default is None.\n :param palette: A dict of colors. Default is None.\n :param legend_args: A dict of arguments for the legend. Default is None.\n :param plot_args: A dict of arguments for plotting such as marker.\n :return result: Figure and Axes objects.\n \n '''\n return singleplot_wrapper('hist', traces, skip=skip, figs=figs, AX=AX, palette=palette, legend_args=legend_args, plot_args=plot_args)\n \ndef singleplot_wrapper(plot_func, traces, skip=[], figs=[], AX={}, palette={}, legend_args={}, plot_args={}):\n '''\n :meta private:\n '''\n figs = [] if figs is None else figs\n AX = {} if AX is None else AX\n palette = {} if palette is None else palette\n legend_args = {} if legend_args is None else legend_args\n plot_args = {} if plot_args is None else plot_args \n \n figs1, AX1, variables, first_label = setup_singleplot(traces, skip, figs, AX)\n palette1 = make_palette(traces, variables, palette)\n legend_args1 = legend_args if AX else {'labels': []}\n \n for label in traces:\n figs1, AX1 = plot_helper(plot_func, label, traces[label], variables, figs=figs1, AX=AX1, palette=palette1[label], legend_args=legend_args1, plot_args=plot_args)\n \n [fs(fig) for fig in figs1]\n \n if not AX:\n AX1[next(iter(AX1))].legend(**legend_args)\n return figs1, AX1\n\n###############################################################################\n#Supporting Functions for Wrapping\n###############################################################################\ndef setup_singleplot(traces, skip, figs, AX):\n '''\n :meta private:\n '''\n first_label = next(iter(traces))\n variables = [variable for variable in traces[first_label].columns.to_list() if variable not in skip]\n n = len(variables)\n \n figs1 = figs if AX else [plt.figure()]\n if AX:\n AX1 = AX \n else:\n AX1 = {variables[i]: figs1[0].add_subplot(n//2 + n%2, 2, i+1) for i in range(len(variables))}\n \n return figs1, AX1, variables, first_label\n\ndef make_palette(traces, variables, palette, gradient=1, palette_type='light'):\n '''\n :meta private:\n '''\n global palette_types\n if palette:\n if type(palette[next(iter(palette))]) == dict:\n #Assume colors have been fully specified\n palette1 = palette\n else:\n #Assume colors for each trace has been specified\n palette1 = {}\n for label in palette:\n base_color = palette[label]\n colors = palette_types[palette_type](gradient, base_color)\n palette1[label] = {variable: colors for variable in variables}\n \n else:\n base_colors = palette_types['color'](len(traces), 'muted')\n \n palette1 = {}\n labels = list(traces.keys())\n for i in range(len(labels)):\n label = labels[i] if type(labels[i]) != list else tuple(labels[i])\n base_color = base_colors[i]\n colors = palette_types[palette_type](gradient, base_color)\n palette1[label] = {variable: colors for variable in variables}\n \n return palette1\n\n###############################################################################\n#Main Plotting\n###############################################################################\ndef plot_helper(plot_func, label, trace, variables, figs=[], AX={}, palette={}, legend_args={}, plot_args={}):\n '''\n :meta private:\n '''\n n = len(variables) \n figs1 = figs if AX else [plt.figure() for i in range(n)]\n AX1 = AX if AX else {variables[i]: figs1[i].add_subplot(1,1,1) for i in range(n)}\n\n lines = []\n for i in range(len(variables)):\n \n if type(variables[i]) == str:\n vx, vy = None, variables[i]\n else:\n vx, vy = variables[i]\n \n ax = AX1[variables[i]]\n ax.set(xlabel=vx, ylabel=vy)\n ax.ticklabel_format(style='sci', scilimits=(-2,3))\n \n colors = palette.get(variables[i], [all_colors['baby blue']])\n x = np.array_split(trace[vx], len(colors)) if vx else [[]]*len(colors)\n y = np.array_split(trace[vy], len(colors))\n\n mapped_lines = [axis_plot(ax, plot_func, x=x[i], y=y[i], color=colors[i], label=label if i==len(colors)-1 else '_nolabel',**plot_args) for i in range(len(colors))]\n\n lines.append(mapped_lines[-1])\n \n if legend_args: \n try:\n if len(legend_args.get('labels', [])):\n legend_args['handles'] = lines\n for key in AX1:\n AX1[key].legend(**legend_args)\n except:\n pass\n return figs1, AX1\n\ndef axis_plot(ax, func, x, y, *args, **kwargs):\n '''\n :meta private:\n '''\n if func == 'plot': \n if len(x):\n ax.plot(x, y, *args, **kwargs)\n else:\n ax.plot(y, *args, **kwargs)\n elif func == 'hist':\n ax.hist(y, *args, **kwargs)\n elif func == 'kde':\n if len(x):\n sns.kdeplot(data=y, data2=x, *args, ax=ax, **kwargs)\n else:\n sns.kdeplot(y, *args, ax=ax, **kwargs)\n return\n\ndef fs(figure):\n '''\n :meta private:\n '''\n try:\n plt.figure(figure.number)\n backend = get_backend()\n manager = plt.get_current_fig_manager()\n \n if backend == 'TkAgg':\n manager.resize(*manager.window.maxsize())\n \n elif backend == 'Qt5Agg' or backend == 'Qt4Agg': \n manager.window.showMaximized()\n \n else:\n manager.frame.Maximize(True)\n plt.pause(0.03)\n except:\n pass\n return figure",
"import numpy as np\nfrom numpy import log as ln\nfrom numpy import log10 as log\nfrom numpy import exp\nfrom numba import jit\n\n@jit(nopython=True)\ndef model_LogicGate_Not_Double_MaturationSecond(y, t, params):\n\tm1 = y[0]\n\tm2 = y[1]\n\tp1 = y[2]\n\tp2n = y[3]\n\tp2 = y[4]\n\n\tsynm1 = params[0]\n\tsynm2 = params[1]\n\tdegm = params[2]\n\tkp1 = params[3]\n\trep = params[4]\n\tsynp1 = params[5]\n\tsynp2 = params[6]\n\tmatp2 = params[7]\n\tdegp = params[8]\n\tu1 = params[9]\n\n\tdm1 = synm1*u1 -degm *m1\n\tdm2 = synm2*(kp1+rep*p1)/(kp1+p1) -degm *m2\n\tdp1 = synp1 *m1 -degp *p1\n\tdp2n = synp2 *m2 -matp2*p2n\n\tdp2 = matp2*p2n -degp *p2\n\n\treturn np.array([dm1, dm2, dp1, dp2n, dp2])",
"import numpy as np\nfrom numpy import log as ln\nfrom numpy import log10 as log\nfrom numpy import exp\nfrom numba import jit\n\n@jit(nopython=True)\ndef model_TestModel_Monod_Constitutive_Double(y, t, params):\n\tx = y[0]\n\ts = y[1]\n\tmh = y[2]\n\th = y[3]\n\n\tmu_max = params[0]\n\tKs = params[1]\n\tY = params[2]\n\tsynm = params[3]\n\tdegm = params[4]\n\tsynh = params[5]\n\n\tmu = mu_max*s/(s+Ks)\n\tdx = x*mu\n\tds = -dx/Y\n\tdmh= synm -mh*degm\n\tdh = synh*mh -h *mu\n\n\treturn np.array([dx, ds, dmh, dh])",
"import numpy as np\nfrom numpy import log as ln\nfrom numpy import log10 as log\nfrom numpy import exp\nfrom numba import jit\n\n@jit(nopython=True)\ndef model_BMSS_InducerDegradation_DelayActivation_Inducible_ActiveTransport(y, t, params):\n\tinde = y[0]\n\tindi = y[1]\n\tm = y[2]\n\tp = y[3]\n\n\tvm = params[0]\n\tn_trans = params[1]\n\tk_trans = params[2]\n\tdegind = params[3]\n\tn = params[4]\n\tk_ind = params[5]\n\tsynm = params[6]\n\tdegm = params[7]\n\tsynp = params[8]\n\tdegp = params[9]\n\n\tdinde= -degind*inde -vm*(inde**n_trans)/(inde**n_trans + k_trans**n_trans)\n\tdindi= vm*(inde**n_trans)/(inde**n_trans + k_trans**n_trans)\n\tdm = synm*(indi**n)/(indi**n + k_ind**n) - degm*m\n\tdp = synp*m - degp*p\n\n\treturn np.array([dinde, dindi, dm, dp])"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame",
"matplotlib.get_backend",
"matplotlib.pyplot.get_current_fig_manager",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JMazick/Web-Design-Challenge
|
[
"38f848da734baf68a15a62957fe53dcdba373daf"
] |
[
"WebVisualizations/data_to_html.py"
] |
[
"# Python program to convert \n# CSV to HTML Table \n\n\nimport pandas as pd \n\n# to read csv file named \"cities\" \na = pd.read_csv(\"cities.csv\") \n\n# to save as html file \n# named as \"Table\" \na.to_html(\"Table.html\") \n\n# assign it to a \n# variable (string) \ndata_table = a.to_html() \n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
daobook/autogluon
|
[
"7309118f2ab1c9519f25acf61a283a95af95842b",
"bdbaac2d13d14d075b7aa751561f0bbd39927789",
"7309118f2ab1c9519f25acf61a283a95af95842b"
] |
[
"core/tests/unittests/scheduler/test_scheduler.py",
"core/src/autogluon/core/scheduler/seq_scheduler.py",
"tabular/src/autogluon/tabular/models/tabular_nn/tabular_nn_model.py"
] |
[
"import numpy as np\nimport pickle\nimport time\nfrom autogluon.core.space import Real\nfrom autogluon.core.scheduler import LocalSequentialScheduler\n\n\ndef test_local_sequential_scheduler():\n search_space = dict(\n lr=Real(1e-3, 1e-2, log=True),\n wd=Real(1e-3, 1e-2),\n epochs=10,\n )\n\n def train_fn(args, reporter):\n for e in range(args['epochs']):\n dummy_reward = 1 - np.power(1.8, -np.random.uniform(e, 2 * e))\n reporter(epoch=e + 1, reward=dummy_reward, lr=args.lr, wd=args.wd)\n\n scheduler = LocalSequentialScheduler(train_fn,\n search_space=search_space,\n num_trials=10)\n scheduler.run()\n scheduler.join_jobs()\n best_config = scheduler.get_best_config()\n best_task_id = scheduler.get_best_task_id()\n assert pickle.dumps(scheduler.config_history[best_task_id]) == pickle.dumps(best_config)\n\n\ndef test_timeout_scheduler():\n search_space = dict(\n lr=Real(1E-5, 1E-3),\n )\n\n def train_fn(args, reporter):\n start_tick = time.time()\n time.sleep(0.01)\n reporter(reward=time.time() - start_tick, time_attr=0)\n\n scheduler = LocalSequentialScheduler(train_fn,\n search_space=search_space,\n num_trials=7,\n time_attr='time_attr',\n time_out=0.025)\n scheduler.run()\n scheduler.join_jobs()\n assert len(scheduler.config_history) <= 2\n",
"import logging\nimport time\nimport os\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom typing import Tuple\n\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nfrom .reporter import FakeReporter\nfrom ..searcher import searcher_factory\nfrom ..searcher.local_searcher import LocalSearcher\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalReporter:\n \"\"\"\n Reporter implementation for LocalSequentialScheduler\n \"\"\"\n\n def __init__(self, trial, searcher_config, training_history: dict, config_history: dict):\n self.trial = trial\n self.training_history = training_history\n self.training_history[trial] = []\n self.searcher_config = deepcopy(searcher_config)\n self.config_history = config_history\n self.trial_started = time.time()\n self.last_reported_time = self.trial_started\n self.last_result = None\n\n def __call__(self, *args, **kwargs):\n result = deepcopy(kwargs)\n if 'done' not in result:\n result['trial'] = self.trial\n\n now = time.time()\n result['time_this_iter'] = now - self.last_reported_time\n result['time_since_start'] = now - self.trial_started\n self.last_reported_time = now\n\n self.training_history[self.trial].append(result)\n\n if self.trial not in self.config_history:\n self.config_history[self.trial] = self.searcher_config\n if 'util_args' in self.searcher_config:\n self.searcher_config.pop('util_args')\n\n self.last_result = result\n\n def terminate(self):\n pass # compatibility\n\n\nclass LocalSequentialScheduler(object):\n \"\"\" Simple scheduler which schedules all HPO jobs in sequence without any parallelism.\n The next trial scheduling will be decided based on the available time left withing `time_out` setting\n and average time required for a trial to complete multiplied by the fill_factor (0.95) by default to\n accommodate variance in runtimes per HPO run.\n\n Parameters\n ----------\n train_fn : callable\n A task launch function for training.\n resource : dict\n Computation resources. For example, `{'num_cpus':2, 'num_gpus':1}`\n searcher : str\n Searcher (get_config decisions). If str, this is passed to\n searcher_factory along with search_options.\n search_options : dict\n If searcher is str, these arguments are passed to searcher_factory.\n num_trials : int\n Maximum number of jobs run in experiment. One of `num_trials`,\n `time_out` must be given.\n time_out : float\n If given, jobs are started only until this time_out (wall clock time).\n One of `num_trials`, `time_out` must be given.\n reward_attr : str\n Name of reward (i.e., metric to maximize) attribute in data obtained\n from reporter\n time_attr : str\n Name of resource (or time) attribute in data obtained from reporter.\n This attribute is optional for FIFO scheduling, but becomes mandatory\n in multi-fidelity scheduling (e.g., Hyperband).\n Note: The type of resource must be int.\n \"\"\"\n\n def __init__(self, train_fn, search_space, train_fn_kwargs=None, searcher='auto', reward_attr='reward', resource=None, **kwargs):\n self.train_fn = train_fn\n self.training_history = None\n self.config_history = None\n self._reward_attr = reward_attr\n self.time_attr = kwargs.get('time_attr', None)\n self.resource = resource\n self.max_reward = kwargs.get('max_reward', None)\n self.searcher: LocalSearcher = self.get_searcher_(searcher, train_fn, search_space=search_space, **kwargs)\n self.init_limits_(kwargs)\n self.train_fn_kwargs = train_fn_kwargs\n self.metadata = {\n 'search_space': search_space,\n 'search_strategy': self.searcher,\n 'stop_criterion': {\n 'time_limits': self.time_out,\n 'max_reward': self.max_reward},\n 'resources_per_trial': self.resource\n }\n\n def init_limits_(self, kwargs):\n if kwargs.get('num_trials', None) is None:\n assert kwargs.get('time_out', None) is not None, \"Need stopping criterion: Either num_trials or time_out\"\n self.num_trials = kwargs.get('num_trials', 9999)\n self.time_out = kwargs.get('time_out', None)\n if self.num_trials is None:\n assert self.time_out is not None, \"Need stopping criterion: Either num_trials or time_out\"\n\n def get_searcher_(self, searcher, train_fn, search_space, **kwargs) -> LocalSearcher:\n scheduler_opts = {}\n if searcher == 'auto':\n searcher = 'local_random'\n scheduler_opts = {'scheduler': 'local'}\n elif searcher == 'random':\n # FIXME: Hack to be compatible with gluoncv\n searcher = 'local_random'\n\n search_options = kwargs.get('search_options', None)\n if isinstance(searcher, str):\n if search_options is None:\n search_options = dict()\n _search_options = search_options.copy()\n _search_options['search_space'] = search_space\n _search_options['reward_attribute'] = self._reward_attr\n # Adjoin scheduler info to search_options, if not already done by\n # subclass\n if 'scheduler' not in _search_options:\n _search_options['scheduler'] = 'local'\n searcher = searcher_factory(searcher, **{**scheduler_opts, **_search_options})\n else:\n assert isinstance(searcher, LocalSearcher)\n return searcher\n\n def run(self, **kwargs):\n \"\"\"Run multiple trials given specific time and trial numbers limits.\n \"\"\"\n self.searcher.configure_scheduler(self)\n\n self.training_history = OrderedDict()\n self.config_history = OrderedDict()\n\n trial_run_times = []\n time_start = time.time()\n\n r = range(self.num_trials)\n for i in (tqdm(r) if self.num_trials < 1000 else r):\n trial_start_time = time.time()\n try:\n is_failed, result = self.run_trial(task_id=i)\n except Exception:\n # TODO: Add special exception type when there are no more new configurations to try (exhausted search space)\n logger.log(30, f'\\tWARNING: Encountered unexpected exception during trial {i}, stopping HPO early.')\n logger.exception('Detailed Traceback:') # TODO: Avoid logging if verbosity=0\n break\n trial_end_time = time.time()\n trial_run_times.append(np.NaN if is_failed else (trial_end_time - trial_start_time))\n\n if self.max_reward and self.get_best_reward() >= self.max_reward:\n logger.log(20, f'\\tMax reward is reached')\n break\n\n if self.time_out is not None:\n avg_trial_run_time = np.nanmean(trial_run_times)\n avg_trial_run_time = 0 if np.isnan(avg_trial_run_time) else avg_trial_run_time\n if not self.has_enough_time_for_trial_(self.time_out, time_start, trial_start_time, trial_end_time, avg_trial_run_time):\n logger.log(20, f'\\tTime limit exceeded')\n break\n\n @classmethod\n def has_enough_time_for_trial_(cls, time_out, time_start, trial_start_time, trial_end_time, avg_trial_run_time, fill_factor=0.95):\n \"\"\"\n Checks if the remaining time is enough to run another trial.\n\n Parameters\n ----------\n time_out total\n timeout in m\n time_start\n trials start time\n trial_start_time\n last trial start time\n trial_end_time\n last trial end time\n avg_trial_run_time\n running average of all trial runs\n fill_factor: float\n discount of `avg_trial_run_time` allowed for a next trial. Default is 0.95 of `avg_trial_run_time`\n\n Returns\n -------\n True if there is enough time to run another trial give runs statistics and remaining time\n\n \"\"\"\n time_spent = trial_end_time - time_start\n is_timeout_exceeded = time_spent >= time_out\n time_left = time_start + time_out - trial_end_time\n is_enough_time_for_another_trial = True\n if avg_trial_run_time:\n is_enough_time_for_another_trial = time_left > avg_trial_run_time * fill_factor\n return is_enough_time_for_another_trial and not is_timeout_exceeded\n\n @classmethod\n def get_average_trial_time_(cls, i, avg_trial_run_time, trial_start_time, time_end):\n trial_time = time_end - trial_start_time\n if avg_trial_run_time is None:\n avg_trial_run_time = trial_time\n else:\n avg_trial_run_time = ((avg_trial_run_time * i) + trial_time) / (i + 1)\n return avg_trial_run_time\n\n def run_trial(self, task_id=0) -> Tuple[bool, dict]:\n \"\"\"\n Start a trial with a given task_id\n\n Parameters\n ----------\n task_id\n task\n\n Returns\n -------\n is_failed: bool\n True if task completed successfully\n trial_start_time\n Trial start time\n trial_end_time\n Trial end time\n\n \"\"\"\n new_searcher_config = self.searcher.get_config()\n searcher_config = deepcopy(self.metadata['search_space'])\n searcher_config.update(new_searcher_config)\n reporter = LocalReporter(task_id, searcher_config, self.training_history, self.config_history)\n return self.run_job_(task_id, searcher_config, reporter)\n\n def run_job_(self, task_id, searcher_config, reporter):\n args = dict()\n if self.train_fn_kwargs is not None:\n train_fn_kwargs = deepcopy(self.train_fn_kwargs)\n else:\n train_fn_kwargs = dict()\n args.update(searcher_config)\n\n args['task_id'] = task_id\n self.searcher.register_pending(searcher_config)\n is_failed = False\n try:\n result = self.train_fn(args, reporter=reporter, **train_fn_kwargs)\n if type(reporter) is not FakeReporter and reporter.last_result:\n self.searcher.update(config=searcher_config, **reporter.last_result)\n except Exception as e:\n logger.error(f'Exception during a trial: {e}')\n self.searcher.evaluation_failed(config=searcher_config)\n reporter(traceback=e)\n is_failed = True\n result = {'traceback': str(e)}\n return is_failed, result\n\n def run_with_config(self, config):\n \"\"\"Run with config for final fit.\n It launches a single training trial under any fixed values of the hyperparameters.\n For example, after HPO has identified the best hyperparameter values based on a hold-out dataset,\n one can use this function to retrain a model with the same hyperparameters on all the available labeled data\n (including the hold out set). It can also returns other objects or states.\n \"\"\"\n is_failed, result = self.run_job_('run_with_config', config, FakeReporter())\n return result\n\n def join_jobs(self, timeout=None):\n pass # Compatibility\n\n def get_best_config(self):\n \"\"\"Get the best configuration from the finished jobs.\n \"\"\"\n # TODO: Consider passing the metadata search space to searcher to avoid having to do this\n searcher_config = deepcopy(self.metadata['search_space'])\n searcher_config.update(self.searcher.get_best_config())\n return searcher_config\n\n def get_best_reward(self):\n \"\"\"Get the best reward from the finished jobs.\n \"\"\"\n return self.searcher.get_best_reward()\n\n def get_training_curves(self, filename=None, plot=False, use_legend=True):\n \"\"\"Get Training Curves\n \"\"\"\n if filename is None and not plot:\n logger.warning('Please either provide filename or allow plot in get_training_curves')\n import matplotlib.pyplot as plt\n\n eval_metric = self.__get_training_history_metric('eval_metric', default='validation_performance')\n sign_mult = int(self.__get_training_history_metric('greater_is_better', default=True)) * 2 - 1\n\n plt.ylabel(eval_metric)\n plt.xlabel(self.time_attr)\n plt.title(\"Performance vs Training-Time in each HPO Trial\")\n for task_id, task_res in self.training_history.items():\n rewards = [x[self._reward_attr] * sign_mult for x in task_res]\n x = [x[self.time_attr] for x in task_res]\n plt.plot(x, rewards, label=f'task {task_id}')\n if use_legend:\n plt.legend(loc='best')\n if filename:\n logger.info(f'Saving Training Curve in {filename}')\n file_dir = os.path.split(os.path.abspath(filename))[0]\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n plt.savefig(filename)\n if plot:\n plt.show()\n\n def __get_training_history_metric(self, metric, default=None):\n for _, task_res in self.training_history.items():\n if task_res and metric in task_res[0]:\n return task_res[0][metric]\n return default\n\n def get_best_task_id(self):\n \"\"\"Get the task id that results in the best configuration/best reward.\n\n If there are duplicated configurations, we return the id of the first one.\n \"\"\"\n best_config = self.get_best_config()\n for task_id, config in self.config_history.items():\n if best_config == config:\n return task_id\n raise RuntimeError('The best config {} is not found in config history = {}. '\n 'This should never happen!'.format(best_config, self.config_history))\n",
"\"\"\" MXNet neural networks for tabular data containing numerical, categorical, and text fields.\n First performs neural network specific pre-processing of the data.\n Contains separate input modules which are applied to different columns of the data depending on the type of values they contain:\n - Numeric columns are pased through single Dense layer (binary categorical variables are treated as numeric)\n - Categorical columns are passed through separate Embedding layers\n - Text columns are passed through separate LanguageModel layers\n Vectors produced by different input layers are then concatenated and passed to multi-layer MLP model with problem_type determined output layer.\n Hyperparameters are passed as dict params, including options for preprocessing stages.\n\"\"\"\nimport json\nimport logging\nimport os\nimport random\nimport time\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler, QuantileTransformer, FunctionTransformer # PowerTransformer\n\nfrom autogluon.common.features.types import R_OBJECT, S_TEXT_NGRAM, S_TEXT_AS_CATEGORY\nfrom autogluon.common.utils.pandas_utils import get_approximate_df_mem_usage\nfrom autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS\nfrom autogluon.core.utils import try_import_mxboard, try_import_mxnet\nfrom autogluon.core.utils.exceptions import TimeLimitExceeded, NotEnoughMemoryError\n\nfrom .categorical_encoders import OneHotMergeRaresHandleUnknownEncoder, OrdinalMergeRaresHandleUnknownEncoder\nfrom .hyperparameters.parameters import get_default_param\nfrom .hyperparameters.searchspaces import get_default_searchspace\nfrom autogluon.core.models.abstract.abstract_model import AbstractNeuralNetworkModel\nfrom ..utils import fixedvals_from_searchspaces\n\nwarnings.filterwarnings(\"ignore\", module='sklearn.preprocessing') # sklearn processing n_quantiles warning\nlogger = logging.getLogger(__name__)\nEPS = 1e-10 # small number\n\n\n# TODO: Gets stuck after infering feature types near infinitely in nyc-jiashenliu-515k-hotel-reviews-data-in-europe dataset, 70 GB of memory, c5.9xlarge\n# Suspect issue is coming from embeddings due to text features with extremely large categorical counts.\nclass TabularNeuralNetModel(AbstractNeuralNetworkModel):\n \"\"\" Class for neural network models that operate on tabular data.\n These networks use different types of input layers to process different types of data in various columns.\n\n Attributes:\n _types_of_features (dict): keys = 'continuous', 'skewed', 'onehot', 'embed', 'language'; values = column-names of Dataframe corresponding to the features of this type\n feature_arraycol_map (OrderedDict): maps feature-name -> list of column-indices in df corresponding to this feature\n self.feature_type_map (OrderedDict): maps feature-name -> feature_type string (options: 'vector', 'embed', 'language')\n processor (sklearn.ColumnTransformer): scikit-learn preprocessor object.\n\n Note: This model always assumes higher values of self.eval_metric indicate better performance.\n\n \"\"\"\n\n # Constants used throughout this class:\n # model_internals_file_name = 'model-internals.pkl' # store model internals here\n unique_category_str = '!missing!' # string used to represent missing values and unknown categories for categorical features. Should not appear in the dataset\n params_file_name = 'net.params' # Stores parameters of final network\n temp_file_name = 'temp_net.params' # Stores temporary network parameters (eg. during the course of training)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n \"\"\"\n TabularNeuralNetModel object.\n\n Parameters\n ----------\n path (str): file-path to directory where to save files associated with this model\n name (str): name used to refer to this model\n problem_type (str): what type of prediction problem is this model used for\n eval_metric (func): function used to evaluate performance (Note: we assume higher = better)\n hyperparameters (dict): various hyperparameters for neural network and the NN-specific data processing\n features (list): List of predictive features to use, other features are ignored by the model.\n \"\"\"\n self.feature_arraycol_map = None\n self.feature_type_map = None\n self.features_to_drop = [] # may change between different bagging folds. TODO: consider just removing these from self._features_internal\n self.processor = None # data processor\n self.summary_writer = None\n self.ctx = None\n self.batch_size = None\n self.num_dataloading_workers = None\n self.num_dataloading_workers_inference = 0\n self.params_post_fit = None\n self.num_net_outputs = None\n self._architecture_desc = None\n self.optimizer = None\n self.verbosity = None\n\n def _set_default_params(self):\n \"\"\" Specifies hyperparameter values to use by default \"\"\"\n default_params = get_default_param(self.problem_type)\n for param, val in default_params.items():\n self._set_default_param_value(param, val)\n\n def _get_default_auxiliary_params(self) -> dict:\n default_auxiliary_params = super()._get_default_auxiliary_params()\n extra_auxiliary_params = dict(\n ignored_type_group_raw=[R_OBJECT],\n ignored_type_group_special=[S_TEXT_NGRAM, S_TEXT_AS_CATEGORY],\n )\n default_auxiliary_params.update(extra_auxiliary_params)\n return default_auxiliary_params\n\n def _get_default_searchspace(self):\n return get_default_searchspace(self.problem_type, num_classes=None)\n\n def set_net_defaults(self, train_dataset, params):\n \"\"\" Sets dataset-adaptive default values to use for our neural network \"\"\"\n if (self.problem_type == MULTICLASS) or (self.problem_type == SOFTCLASS):\n self.num_net_outputs = train_dataset.num_classes\n elif self.problem_type == REGRESSION:\n self.num_net_outputs = 1\n if params['y_range'] is None: # Infer default y-range\n y_vals = train_dataset.dataset._data[train_dataset.label_index].asnumpy()\n min_y = float(min(y_vals))\n max_y = float(max(y_vals))\n std_y = np.std(y_vals)\n y_ext = params['y_range_extend'] * std_y\n if min_y >= 0: # infer y must be nonnegative\n min_y = max(0, min_y-y_ext)\n else:\n min_y = min_y-y_ext\n if max_y <= 0: # infer y must be non-positive\n max_y = min(0, max_y+y_ext)\n else:\n max_y = max_y+y_ext\n params['y_range'] = (min_y, max_y)\n elif self.problem_type == BINARY:\n self.num_net_outputs = 2\n else:\n raise ValueError(\"unknown problem_type specified: %s\" % self.problem_type)\n\n if params['layers'] is None: # Use default choices for MLP architecture\n if self.problem_type == REGRESSION:\n default_layer_sizes = [256, 128] # overall network will have 4 layers. Input layer, 256-unit hidden layer, 128-unit hidden layer, output layer.\n else:\n default_sizes = [256, 128] # will be scaled adaptively\n # base_size = max(1, min(self.num_net_outputs, 20)/2.0) # scale layer width based on number of classes\n base_size = max(1, min(self.num_net_outputs, 100) / 50) # TODO: Updated because it improved model quality and made training far faster\n default_layer_sizes = [defaultsize*base_size for defaultsize in default_sizes]\n layer_expansion_factor = 1 # TODO: consider scaling based on num_rows, eg: layer_expansion_factor = 2-np.exp(-max(0,train_dataset.num_examples-10000))\n\n max_layer_width = params['max_layer_width']\n params['layers'] = [int(min(max_layer_width, layer_expansion_factor*defaultsize)) for defaultsize in default_layer_sizes]\n\n if train_dataset.has_vector_features() and params['numeric_embed_dim'] is None: # Use default choices for numeric embedding size\n vector_dim = train_dataset.dataset._data[train_dataset.vectordata_index].shape[1] # total dimensionality of vector features\n prop_vector_features = train_dataset.num_vector_features() / float(train_dataset.num_features) # Fraction of features that are numeric\n min_numeric_embed_dim = 32\n max_numeric_embed_dim = params['max_layer_width']\n params['numeric_embed_dim'] = int(min(max_numeric_embed_dim, max(min_numeric_embed_dim,\n params['layers'][0]*prop_vector_features*np.log10(vector_dim+10) )))\n return\n\n def _fit(self, X, y, X_val=None, y_val=None,\n time_limit=None, sample_weight=None, num_cpus=1, num_gpus=0, reporter=None, **kwargs):\n \"\"\" X (pd.DataFrame): training data features (not necessarily preprocessed yet)\n X_val (pd.DataFrame): test data features (should have same column names as Xtrain)\n y (pd.Series):\n y_val (pd.Series): are pandas Series\n kwargs: Can specify amount of compute resources to utilize (num_cpus, num_gpus).\n \"\"\"\n start_time = time.time()\n try_import_mxnet()\n import mxnet as mx\n self.verbosity = kwargs.get('verbosity', 2)\n if sample_weight is not None: # TODO: support\n logger.log(15, \"sample_weight not yet supported for TabularNeuralNetModel, this model will ignore them in training.\")\n\n params = self._get_model_params()\n params = fixedvals_from_searchspaces(params)\n\n if num_cpus is not None:\n self.num_dataloading_workers = max(1, int(num_cpus/2.0))\n else:\n self.num_dataloading_workers = 1\n if self.num_dataloading_workers == 1:\n self.num_dataloading_workers = 0 # 0 is always faster and uses less memory than 1\n self.batch_size = params['batch_size']\n train_dataset, val_dataset = self.generate_datasets(X=X, y=y, params=params, X_val=X_val, y_val=y_val)\n logger.log(15, \"Training data for neural network has: %d examples, %d features (%d vector, %d embedding, %d language)\" %\n (train_dataset.num_examples, train_dataset.num_features,\n len(train_dataset.feature_groups['vector']), len(train_dataset.feature_groups['embed']),\n len(train_dataset.feature_groups['language']) ))\n # self._save_preprocessor() # TODO: should save these things for hyperparam tunning. Need one HP tuner for network-specific HPs, another for preprocessing HPs.\n\n if num_gpus is not None and num_gpus >= 1:\n self.ctx = mx.gpu() # Currently cannot use more than 1 GPU\n else:\n self.ctx = mx.cpu()\n self.get_net(train_dataset, params=params)\n\n if time_limit is not None:\n time_elapsed = time.time() - start_time\n time_limit_orig = time_limit\n time_limit = time_limit - time_elapsed\n if time_limit <= time_limit_orig * 0.4: # if 60% of time was spent preprocessing, likely not enough time to train model\n raise TimeLimitExceeded\n\n self.train_net(train_dataset=train_dataset, params=params, val_dataset=val_dataset, initialize=True, setup_trainer=True, time_limit=time_limit, reporter=reporter)\n self.params_post_fit = params\n \"\"\"\n # TODO: if we don't want to save intermediate network parameters, need to do something like saving in temp directory to clean up after training:\n with make_temp_directory() as temp_dir:\n save_callback = SaveModelCallback(self.model, monitor=self.metric, mode=save_callback_mode, name=self.name)\n with progress_disabled_ctx(self.model) as model:\n original_path = model.path\n model.path = Path(temp_dir)\n model.fit_one_cycle(self.epochs, self.lr, callbacks=save_callback)\n\n # Load the best one and export it\n model.load(self.name)\n print(f'Model validation metrics: {model.validate()}')\n model.path = original_path\n \"\"\"\n\n def get_net(self, train_dataset, params):\n \"\"\" Creates a Gluon neural net and context for this dataset.\n Also sets up trainer/optimizer as necessary.\n \"\"\"\n from .embednet import EmbedNet\n self.set_net_defaults(train_dataset, params)\n self.model = EmbedNet(train_dataset=train_dataset, params=params, num_net_outputs=self.num_net_outputs, ctx=self.ctx)\n\n # TODO: Below should not occur until at time of saving\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n def train_net(self, train_dataset, params, val_dataset=None, initialize=True, setup_trainer=True, time_limit=None, reporter=None):\n \"\"\" Trains neural net on given train dataset, early stops based on test_dataset.\n Args:\n train_dataset (TabularNNDataset): training data used to learn network weights\n val_dataset (TabularNNDataset): validation data used for hyperparameter tuning\n initialize (bool): set = False to continue training of a previously trained model, otherwise initializes network weights randomly\n setup_trainer (bool): set = False to reuse the same trainer from a previous training run, otherwise creates new trainer from scratch\n \"\"\"\n start_time = time.time()\n import mxnet as mx\n logger.log(15, \"Training neural network for up to %s epochs...\" % params['num_epochs'])\n seed_value = params.get('seed_value')\n if seed_value is not None: # Set seed\n random.seed(seed_value)\n np.random.seed(seed_value)\n mx.random.seed(seed_value)\n if initialize: # Initialize the weights of network\n logging.debug(\"initializing neural network...\")\n self.model.collect_params().initialize(ctx=self.ctx)\n self.model.hybridize()\n logging.debug(\"initialized\")\n if setup_trainer:\n # Also setup mxboard to monitor training if visualizer has been specified:\n visualizer = self.params_aux.get('visualizer', 'none')\n if visualizer == 'tensorboard' or visualizer == 'mxboard':\n try_import_mxboard()\n from mxboard import SummaryWriter\n self.summary_writer = SummaryWriter(logdir=self.path, flush_secs=5, verbose=False)\n self.optimizer = self.setup_trainer(params=params, train_dataset=train_dataset)\n best_val_metric = -np.inf # higher = better\n val_metric = None\n best_val_epoch = 0\n val_improve_epoch = 0 # most recent epoch where validation-score strictly improved\n num_epochs = params['num_epochs']\n if val_dataset is not None:\n y_val = val_dataset.get_labels()\n else:\n y_val = None\n\n if params['loss_function'] is None:\n if self.problem_type == REGRESSION:\n params['loss_function'] = mx.gluon.loss.L1Loss()\n elif self.problem_type == SOFTCLASS:\n params['loss_function'] = mx.gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=False, from_logits=self.model.from_logits)\n else:\n params['loss_function'] = mx.gluon.loss.SoftmaxCrossEntropyLoss(from_logits=self.model.from_logits)\n\n loss_func = params['loss_function']\n epochs_wo_improve = params['epochs_wo_improve']\n loss_scaling_factor = 1.0 # we divide loss by this quantity to stabilize gradients\n\n rescale_losses = {mx.gluon.loss.L1Loss: 'std', mx.gluon.loss.HuberLoss: 'std', mx.gluon.loss.L2Loss: 'var'} # dict of loss names where we should rescale loss, value indicates how to rescale.\n loss_torescale = [key for key in rescale_losses if isinstance(loss_func, key)]\n if loss_torescale:\n loss_torescale = loss_torescale[0]\n if rescale_losses[loss_torescale] == 'std':\n loss_scaling_factor = np.std(train_dataset.get_labels())/5.0 + EPS # std-dev of labels\n elif rescale_losses[loss_torescale] == 'var':\n loss_scaling_factor = np.var(train_dataset.get_labels())/5.0 + EPS # variance of labels\n else:\n raise ValueError(\"Unknown loss-rescaling type %s specified for loss_func==%s\" % (rescale_losses[loss_torescale], loss_func))\n\n if self.verbosity <= 1:\n verbose_eval = -1 # Print losses every verbose epochs, Never if -1\n elif self.verbosity == 2:\n verbose_eval = 50\n elif self.verbosity == 3:\n verbose_eval = 10\n else:\n verbose_eval = 1\n\n net_filename = self.path + self.temp_file_name\n if num_epochs == 0: # use dummy training loop that stops immediately (useful for using NN just for data preprocessing / debugging)\n logger.log(20, \"Not training Neural Net since num_epochs == 0. Neural network architecture is:\")\n for batch_idx, data_batch in enumerate(train_dataset.dataloader):\n data_batch = train_dataset.format_batch_data(data_batch, self.ctx)\n with mx.autograd.record():\n output = self.model(data_batch)\n labels = data_batch['label']\n loss = loss_func(output, labels) / loss_scaling_factor\n # print(str(mx.nd.mean(loss).asscalar()), end=\"\\r\") # prints per-batch losses\n loss.backward()\n self.optimizer.step(labels.shape[0])\n if batch_idx > 0:\n break\n self.model.save_parameters(net_filename)\n logger.log(15, \"untrained Neural Net saved to file\")\n return\n\n start_fit_time = time.time()\n if time_limit is not None:\n time_limit = time_limit - (start_fit_time - start_time)\n\n # Training Loop:\n for e in range(num_epochs):\n if e == 0: # special actions during first epoch:\n logger.log(15, \"Neural network architecture:\")\n logger.log(15, str(self.model))\n cumulative_loss = 0\n for batch_idx, data_batch in enumerate(train_dataset.dataloader):\n data_batch = train_dataset.format_batch_data(data_batch, self.ctx)\n with mx.autograd.record():\n output = self.model(data_batch)\n labels = data_batch['label']\n loss = loss_func(output, labels) / loss_scaling_factor\n # print(str(mx.nd.mean(loss).asscalar()), end=\"\\r\") # prints per-batch losses\n loss.backward()\n self.optimizer.step(labels.shape[0])\n cumulative_loss += loss.sum()\n train_loss = cumulative_loss/float(train_dataset.num_examples) # training loss this epoch\n if val_dataset is not None:\n # FIXME: Switch to adaptive ES\n val_metric = self.score(X=val_dataset, y=y_val, metric=self.stopping_metric)\n if np.isnan(val_metric):\n if e == 0:\n raise RuntimeError(\"NaNs encountered in TabularNeuralNetModel training. Features/labels may be improperly formatted or NN weights may have diverged.\")\n else:\n logger.warning(\"Warning: NaNs encountered in TabularNeuralNetModel training. Reverting model to last checkpoint without NaNs.\")\n break\n if (val_metric >= best_val_metric) or (e == 0):\n if val_metric > best_val_metric:\n val_improve_epoch = e\n best_val_metric = val_metric\n best_val_epoch = e\n # Until functionality is added to restart training from a particular epoch, there is no point in saving params without test_dataset\n self.model.save_parameters(net_filename)\n else:\n best_val_epoch = e\n if val_dataset is not None:\n if verbose_eval > 0 and e % verbose_eval == 0:\n logger.log(15, \"Epoch %s. Train loss: %s, Val %s: %s\" %\n (e, train_loss.asscalar(), self.stopping_metric.name, val_metric))\n if self.summary_writer is not None:\n self.summary_writer.add_scalar(tag='val_'+self.stopping_metric.name,\n value=val_metric, global_step=e)\n else:\n if verbose_eval > 0 and e % verbose_eval == 0:\n logger.log(15, \"Epoch %s. Train loss: %s\" % (e, train_loss.asscalar()))\n if self.summary_writer is not None:\n self.summary_writer.add_scalar(tag='train_loss', value=train_loss.asscalar(), global_step=e) # TODO: do we want to keep mxboard support?\n if reporter is not None:\n # TODO: Ensure reporter/scheduler properly handle None/nan values after refactor\n if val_dataset is not None and (not np.isnan(val_metric)): # TODO: This might work without the if statement\n # epoch must be number of epochs done (starting at 1)\n reporter(epoch=e + 1,\n validation_performance=val_metric, # Higher val_metric = better\n train_loss=float(train_loss.asscalar()),\n eval_metric=self.eval_metric.name,\n greater_is_better=self.eval_metric.greater_is_better)\n if e - val_improve_epoch > epochs_wo_improve:\n break # early-stop if validation-score hasn't strictly improved in `epochs_wo_improve` consecutive epochs\n if time_limit is not None:\n time_elapsed = time.time() - start_fit_time\n time_epoch_average = time_elapsed / (e+1)\n time_left = time_limit - time_elapsed\n if time_left < time_epoch_average:\n logger.log(20, f\"\\tRan out of time, stopping training early. (Stopping on epoch {e})\")\n break\n\n if val_dataset is not None:\n self.model.load_parameters(net_filename) # Revert back to best model\n try:\n os.remove(net_filename)\n except FileNotFoundError:\n pass\n if val_dataset is None:\n logger.log(15, \"Best model found in epoch %d\" % best_val_epoch)\n else: # evaluate one final time:\n final_val_metric = self.score(X=val_dataset, y=y_val, metric=self.stopping_metric)\n if np.isnan(final_val_metric):\n final_val_metric = -np.inf\n logger.log(15, \"Best model found in epoch %d. Val %s: %s\" %\n (best_val_epoch, self.stopping_metric.name, final_val_metric))\n self.params_trained['num_epochs'] = best_val_epoch + 1\n return\n\n def _predict_proba(self, X, **kwargs):\n \"\"\" To align predict with abstract_model API.\n Preprocess here only refers to feature processing steps done by all AbstractModel objects,\n not tabularNN-specific preprocessing steps.\n If X is not DataFrame but instead TabularNNDataset object, we can still produce predictions,\n but cannot use preprocess in this case (needs to be already processed).\n \"\"\"\n from .tabular_nn_dataset import TabularNNDataset\n if isinstance(X, TabularNNDataset):\n return self._predict_tabular_data(new_data=X, process=False, predict_proba=True)\n elif isinstance(X, pd.DataFrame):\n X = self.preprocess(X, **kwargs)\n return self._predict_tabular_data(new_data=X, process=True, predict_proba=True)\n else:\n raise ValueError(\"X must be of type pd.DataFrame or TabularNNDataset, not type: %s\" % type(X))\n\n def _predict_tabular_data(self, new_data, process=True, predict_proba=True): # TODO ensure API lines up with tabular.Model class.\n \"\"\" Specific TabularNN method to produce predictions on new (unprocessed) data.\n Returns 1D numpy array unless predict_proba=True and task is multi-class classification (not binary).\n Args:\n new_data (pd.Dataframe or TabularNNDataset): new data to make predictions on.\n If you want to make prediction for just a single row of new_data, pass in: new_data.iloc[[row_index]]\n process (bool): should new data be processed (if False, new_data must be TabularNNDataset)\n predict_proba (bool): should we output class-probabilities (not used for regression)\n \"\"\"\n from .tabular_nn_dataset import TabularNNDataset\n import mxnet as mx\n if process:\n new_data = self.process_test_data(new_data, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers_inference, labels=None)\n if not isinstance(new_data, TabularNNDataset):\n raise ValueError(\"new_data must of of type TabularNNDataset if process=False\")\n if self.problem_type == REGRESSION or not predict_proba:\n preds = mx.nd.zeros((new_data.num_examples,1))\n else:\n preds = mx.nd.zeros((new_data.num_examples, self.num_net_outputs))\n i = 0\n for batch_idx, data_batch in enumerate(new_data.dataloader):\n data_batch = new_data.format_batch_data(data_batch, self.ctx)\n preds_batch = self.model(data_batch)\n batch_size = len(preds_batch)\n if self.problem_type != REGRESSION:\n if not predict_proba: # need to take argmax\n preds_batch = mx.nd.argmax(preds_batch, axis=1, keepdims=True)\n else: # need to take softmax\n preds_batch = mx.nd.softmax(preds_batch, axis=1)\n preds[i:(i+batch_size)] = preds_batch\n i = i+batch_size\n if self.problem_type == REGRESSION or not predict_proba:\n return preds.asnumpy().flatten() # return 1D numpy array\n elif self.problem_type == BINARY and predict_proba:\n return preds[:,1].asnumpy() # for binary problems, only return P(Y==+1)\n\n return preds.asnumpy() # return 2D numpy array\n\n def generate_datasets(self, X, y, params, X_val=None, y_val=None):\n impute_strategy = params['proc.impute_strategy']\n max_category_levels = params['proc.max_category_levels']\n skew_threshold = params['proc.skew_threshold']\n embed_min_categories = params['proc.embed_min_categories']\n use_ngram_features = params['use_ngram_features']\n\n from .tabular_nn_dataset import TabularNNDataset\n if isinstance(X, TabularNNDataset):\n train_dataset = X\n else:\n X = self.preprocess(X)\n train_dataset = self.process_train_data(\n df=X, labels=y, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers,\n impute_strategy=impute_strategy, max_category_levels=max_category_levels, skew_threshold=skew_threshold, embed_min_categories=embed_min_categories, use_ngram_features=use_ngram_features,\n )\n if X_val is not None:\n if isinstance(X_val, TabularNNDataset):\n val_dataset = X_val\n else:\n X_val = self.preprocess(X_val)\n val_dataset = self.process_test_data(df=X_val, labels=y_val, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers_inference)\n else:\n val_dataset = None\n return train_dataset, val_dataset\n\n def process_test_data(self, df, batch_size, num_dataloading_workers, labels=None):\n \"\"\" Process train or test DataFrame into a form fit for neural network models.\n Args:\n df (pd.DataFrame): Data to be processed (X)\n labels (pd.Series): labels to be processed (y)\n test (bool): Is this test data where each datapoint should be processed separately using predetermined preprocessing steps.\n Otherwise preprocessor uses all data to determine propreties like best scaling factors, number of categories, etc.\n Returns:\n Dataset object\n \"\"\"\n from .tabular_nn_dataset import TabularNNDataset\n warnings.filterwarnings(\"ignore\", module='sklearn.preprocessing') # sklearn processing n_quantiles warning\n if labels is not None and len(labels) != len(df):\n raise ValueError(\"Number of examples in Dataframe does not match number of labels\")\n if (self.processor is None or self._types_of_features is None\n or self.feature_arraycol_map is None or self.feature_type_map is None):\n raise ValueError(\"Need to process training data before test data\")\n if self.features_to_drop:\n drop_cols = [col for col in df.columns if col in self.features_to_drop]\n if drop_cols:\n df = df.drop(columns=drop_cols)\n\n df = self.processor.transform(df) # 2D numpy array. self.feature_arraycol_map, self.feature_type_map have been previously set while processing training data.\n return TabularNNDataset(df, self.feature_arraycol_map, self.feature_type_map,\n batch_size=batch_size, num_dataloading_workers=num_dataloading_workers,\n problem_type=self.problem_type, labels=labels, is_test=True)\n\n def process_train_data(self, df, batch_size, num_dataloading_workers, impute_strategy, max_category_levels, skew_threshold, embed_min_categories, use_ngram_features, labels):\n \"\"\" Preprocess training data and create self.processor object that can be used to process future data.\n This method should only be used once per TabularNeuralNetModel object, otherwise will produce Warning.\n\n # TODO no label processing for now\n # TODO: language features are ignored for now\n # TODO: add time/ngram features\n # TODO: no filtering of data-frame columns based on statistics, e.g. categorical columns with all unique variables or zero-variance features.\n This should be done in default_learner class for all models not just TabularNeuralNetModel...\n \"\"\"\n from .tabular_nn_dataset import TabularNNDataset\n warnings.filterwarnings(\"ignore\", module='sklearn.preprocessing') # sklearn processing n_quantiles warning\n if labels is None:\n raise ValueError(\"Attempting process training data without labels\")\n if len(labels) != len(df):\n raise ValueError(\"Number of examples in Dataframe does not match number of labels\")\n\n self._types_of_features, df = self._get_types_of_features(df, skew_threshold=skew_threshold, embed_min_categories=embed_min_categories, use_ngram_features=use_ngram_features) # dict with keys: : 'continuous', 'skewed', 'onehot', 'embed', 'language', values = column-names of df\n logger.log(15, \"AutoGluon Neural Network infers features are of the following types:\")\n logger.log(15, json.dumps(self._types_of_features, indent=4))\n logger.log(15, \"\\n\")\n self.processor = self._create_preprocessor(impute_strategy=impute_strategy, max_category_levels=max_category_levels)\n df = self.processor.fit_transform(df) # 2D numpy array\n self.feature_arraycol_map = self._get_feature_arraycol_map(max_category_levels=max_category_levels) # OrderedDict of feature-name -> list of column-indices in df corresponding to this feature\n num_array_cols = np.sum([len(self.feature_arraycol_map[key]) for key in self.feature_arraycol_map]) # should match number of columns in processed array\n if num_array_cols != df.shape[1]:\n raise ValueError(\"Error during one-hot encoding data processing for neural network. Number of columns in df array does not match feature_arraycol_map.\")\n\n self.feature_type_map = self._get_feature_type_map() # OrderedDict of feature-name -> feature_type string (options: 'vector', 'embed', 'language')\n return TabularNNDataset(df, self.feature_arraycol_map, self.feature_type_map,\n batch_size=batch_size, num_dataloading_workers=num_dataloading_workers,\n problem_type=self.problem_type, labels=labels, is_test=False)\n\n def setup_trainer(self, params, train_dataset=None):\n \"\"\" Set up optimizer needed for training.\n Network must first be initialized before this.\n \"\"\"\n import mxnet as mx\n optimizer_opts = {'learning_rate': params['learning_rate'], 'wd': params['weight_decay'], 'clip_gradient': params['clip_gradient']}\n if 'lr_scheduler' in params and params['lr_scheduler'] is not None:\n if train_dataset is None:\n raise ValueError(\"train_dataset cannot be None when lr_scheduler is specified.\")\n base_lr = params.get('base_lr', 1e-6)\n target_lr = params.get('target_lr', 1.0)\n warmup_epochs = params.get('warmup_epochs', 10)\n lr_decay = params.get('lr_decay', 0.1)\n lr_mode = params['lr_scheduler']\n num_batches = train_dataset.num_examples // params['batch_size']\n lr_decay_epoch = [max(warmup_epochs, int(params['num_epochs']/3)), max(warmup_epochs+1, int(params['num_epochs']/2)),\n max(warmup_epochs+2, int(2*params['num_epochs']/3))]\n from .utils.lr_scheduler import LRSequential, LRScheduler\n lr_scheduler = LRSequential([\n LRScheduler('linear', base_lr=base_lr, target_lr=target_lr, nepochs=warmup_epochs, iters_per_epoch=num_batches),\n LRScheduler(lr_mode, base_lr=target_lr, target_lr=base_lr, nepochs=params['num_epochs'] - warmup_epochs,\n iters_per_epoch=num_batches, step_epoch=lr_decay_epoch, step_factor=lr_decay, power=2)\n ])\n optimizer_opts['lr_scheduler'] = lr_scheduler\n if params['optimizer'] == 'sgd':\n if 'momentum' in params:\n optimizer_opts['momentum'] = params['momentum']\n optimizer = mx.gluon.Trainer(self.model.collect_params(), 'sgd', optimizer_opts)\n elif params['optimizer'] == 'adam': # TODO: Can we try AdamW?\n optimizer = mx.gluon.Trainer(self.model.collect_params(), 'adam', optimizer_opts)\n else:\n raise ValueError(\"Unknown optimizer specified: %s\" % params['optimizer'])\n return optimizer\n\n def _get_feature_arraycol_map(self, max_category_levels):\n \"\"\" Returns OrderedDict of feature-name -> list of column-indices in processed data array corresponding to this feature \"\"\"\n feature_preserving_transforms = set(['continuous','skewed', 'ordinal', 'language']) # these transforms do not alter dimensionality of feature\n feature_arraycol_map = {} # unordered version\n current_colindex = 0\n for transformer in self.processor.transformers_:\n transformer_name = transformer[0]\n transformed_features = transformer[2]\n if transformer_name in feature_preserving_transforms:\n for feature in transformed_features:\n if feature in feature_arraycol_map:\n raise ValueError(\"same feature is processed by two different column transformers: %s\" % feature)\n feature_arraycol_map[feature] = [current_colindex]\n current_colindex += 1\n elif transformer_name == 'onehot':\n oh_encoder = [step for (name, step) in transformer[1].steps if name == 'onehot'][0]\n for i in range(len(transformed_features)):\n feature = transformed_features[i]\n if feature in feature_arraycol_map:\n raise ValueError(\"same feature is processed by two different column transformers: %s\" % feature)\n oh_dimensionality = min(len(oh_encoder.categories_[i]), max_category_levels+1)\n feature_arraycol_map[feature] = list(range(current_colindex, current_colindex+oh_dimensionality))\n current_colindex += oh_dimensionality\n else:\n raise ValueError(\"unknown transformer encountered: %s\" % transformer_name)\n return OrderedDict([(key, feature_arraycol_map[key]) for key in feature_arraycol_map])\n\n def _get_feature_type_map(self):\n \"\"\" Returns OrderedDict of feature-name -> feature_type string (options: 'vector', 'embed', 'language') \"\"\"\n if self.feature_arraycol_map is None:\n raise ValueError(\"must first call _get_feature_arraycol_map() before _get_feature_type_map()\")\n vector_features = self._types_of_features['continuous'] + self._types_of_features['skewed'] + self._types_of_features['onehot']\n feature_type_map = OrderedDict()\n for feature_name in self.feature_arraycol_map:\n if feature_name in vector_features:\n feature_type_map[feature_name] = 'vector'\n elif feature_name in self._types_of_features['embed']:\n feature_type_map[feature_name] = 'embed'\n elif feature_name in self._types_of_features['language']:\n feature_type_map[feature_name] = 'language'\n else:\n raise ValueError(\"unknown feature type encountered\")\n return feature_type_map\n\n def _create_preprocessor(self, impute_strategy, max_category_levels):\n \"\"\" Defines data encoders used to preprocess different data types and creates instance variable which is sklearn ColumnTransformer object \"\"\"\n if self.processor is not None:\n Warning(\"Attempting to process training data for TabularNeuralNetModel, but previously already did this.\")\n continuous_features = self._types_of_features['continuous']\n skewed_features = self._types_of_features['skewed']\n onehot_features = self._types_of_features['onehot']\n embed_features = self._types_of_features['embed']\n language_features = self._types_of_features['language']\n transformers = [] # order of various column transformers in this list is important!\n if continuous_features:\n continuous_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy=impute_strategy)),\n ('scaler', StandardScaler())])\n transformers.append( ('continuous', continuous_transformer, continuous_features) )\n if skewed_features:\n power_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy=impute_strategy)),\n ('quantile', QuantileTransformer(output_distribution='normal')) ]) # Or output_distribution = 'uniform'\n transformers.append( ('skewed', power_transformer, skewed_features) )\n if onehot_features:\n onehot_transformer = Pipeline(steps=[\n # TODO: Consider avoiding converting to string for improved memory efficiency\n ('to_str', FunctionTransformer(convert_df_dtype_to_str)),\n ('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),\n ('onehot', OneHotMergeRaresHandleUnknownEncoder(max_levels=max_category_levels, sparse=False))]) # test-time unknown values will be encoded as all zeros vector\n transformers.append( ('onehot', onehot_transformer, onehot_features) )\n if embed_features: # Ordinal transformer applied to convert to-be-embedded categorical features to integer levels\n ordinal_transformer = Pipeline(steps=[\n ('to_str', FunctionTransformer(convert_df_dtype_to_str)),\n ('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),\n ('ordinal', OrdinalMergeRaresHandleUnknownEncoder(max_levels=max_category_levels))]) # returns 0-n when max_category_levels = n-1. category n is reserved for unknown test-time categories.\n transformers.append( ('ordinal', ordinal_transformer, embed_features) )\n if language_features:\n raise NotImplementedError(\"language_features cannot be used at the moment\")\n return ColumnTransformer(transformers=transformers) # numeric features are processed in the same order as in numeric_features vector, so feature-names remain the same.\n\n def save(self, path: str = None, verbose=True) -> str:\n if self.model is not None:\n self._architecture_desc = self.model.architecture_desc\n temp_model = self.model\n temp_sw = self.summary_writer\n self.model = None\n self.summary_writer = None\n path_final = super().save(path=path, verbose=verbose)\n self.model = temp_model\n self.summary_writer = temp_sw\n self._architecture_desc = None\n\n # Export model\n if self.model is not None:\n params_filepath = path_final + self.params_file_name\n # TODO: Don't use os.makedirs here, have save_parameters function in tabular_nn_model that checks if local path or S3 path\n os.makedirs(os.path.dirname(path_final), exist_ok=True)\n self.model.save_parameters(params_filepath)\n return path_final\n\n @classmethod\n def load(cls, path: str, reset_paths=True, verbose=True):\n model: TabularNeuralNetModel = super().load(path=path, reset_paths=reset_paths, verbose=verbose)\n if model._architecture_desc is not None:\n from .embednet import EmbedNet\n model.model = EmbedNet(architecture_desc=model._architecture_desc, ctx=model.ctx) # recreate network from architecture description\n model._architecture_desc = None\n # TODO: maybe need to initialize/hybridize?\n model.model.load_parameters(model.path + model.params_file_name, ctx=model.ctx)\n model.summary_writer = None\n return model\n\n def get_info(self):\n info = super().get_info()\n info['hyperparameters_post_fit'] = self.params_post_fit\n return info\n\n def reduce_memory_size(self, remove_fit=True, requires_save=True, **kwargs):\n super().reduce_memory_size(remove_fit=remove_fit, requires_save=requires_save, **kwargs)\n if remove_fit and requires_save:\n self.optimizer = None\n\n def _get_default_stopping_metric(self):\n return self.eval_metric\n\n def _estimate_memory_usage(self, X, **kwargs):\n return 10 * get_approximate_df_mem_usage(X).sum()\n\n\ndef convert_df_dtype_to_str(df):\n return df.astype(str)\n\n\n\"\"\" General TODOs:\n\n- Automatically decrease batch-size if memory issue arises\n\n- Retrain final NN on full dataset (train+val). How to ensure stability here?\n- OrdinalEncoder class in sklearn currently cannot handle rare categories or unknown ones at test-time, so we have created our own Encoder in category_encoders.py\nThere is open PR in sklearn to address this: https://github.com/scikit-learn/scikit-learn/pull/13833/files\nCurrently, our code uses category_encoders package (BSD license) instead: https://github.com/scikit-learn-contrib/categorical-encoding\nOnce PR is merged into sklearn, may want to switch: category_encoders.Ordinal -> sklearn.preprocessing.OrdinalEncoder in preprocess_train_data()\n\n- Save preprocessed data so that we can do HPO of neural net hyperparameters more efficiently, while also doing HPO of preprocessing hyperparameters?\n Naive full HPO method requires redoing preprocessing in each trial even if we did not change preprocessing hyperparameters.\n Alternative is we save each proprocessed dataset & corresponding TabularNeuralNetModel object with its unique param names in the file. Then when we try a new HP-config, we first try loading from file if one exists.\n\n\"\"\"\n"
] |
[
[
"numpy.random.uniform"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.isnan",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.nanmean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.random.seed",
"sklearn.preprocessing.FunctionTransformer",
"numpy.isnan",
"sklearn.preprocessing.QuantileTransformer",
"sklearn.impute.SimpleImputer",
"numpy.std",
"numpy.log10",
"sklearn.preprocessing.StandardScaler",
"sklearn.compose.ColumnTransformer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jacob-Zhou/spw-parser
|
[
"5f746a54d9a1da0591fc34f024eac2639bc3f407",
"5f746a54d9a1da0591fc34f024eac2639bc3f407"
] |
[
"parser/model.py",
"parser/cmds/train.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom parser.modules import CHAR_LSTM, MLP, BertEmbedding, Biaffine, BiLSTM\nfrom parser.modules.dropout import IndependentDropout, SharedDropout\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import (pack_padded_sequence, pad_packed_sequence,\n pad_sequence)\n\n\nclass Model(nn.Module):\n\n def __init__(self, args):\n super(Model, self).__init__()\n\n self.args = args\n self.pretrained = False\n # the embedding layer\n self.char_embed = nn.Embedding(num_embeddings=args.n_chars,\n embedding_dim=args.n_embed)\n n_lstm_input = args.n_embed\n if args.feat == 'bert':\n self.feat_embed = BertEmbedding(model=args.bert_model,\n n_layers=args.n_bert_layers,\n n_out=args.n_feat_embed)\n n_lstm_input += args.n_feat_embed\n if self.args.feat in {'bigram', 'trigram'}:\n self.bigram_embed = nn.Embedding(num_embeddings=args.n_bigrams,\n embedding_dim=args.n_embed)\n n_lstm_input += args.n_embed\n if self.args.feat == 'trigram':\n self.trigram_embed = nn.Embedding(num_embeddings=args.n_trigrams,\n embedding_dim=args.n_embed)\n n_lstm_input += args.n_embed\n\n self.embed_dropout = IndependentDropout(p=args.embed_dropout)\n\n # the lstm layer\n self.lstm = BiLSTM(input_size=n_lstm_input,\n hidden_size=args.n_lstm_hidden,\n num_layers=args.n_lstm_layers,\n dropout=args.lstm_dropout)\n self.lstm_dropout = SharedDropout(p=args.lstm_dropout)\n\n # the MLP layers\n self.mlp_span_l = MLP(n_in=args.n_lstm_hidden*2,\n n_out=args.n_mlp_span,\n dropout=args.mlp_dropout)\n self.mlp_span_r = MLP(n_in=args.n_lstm_hidden*2,\n n_out=args.n_mlp_span,\n dropout=args.mlp_dropout)\n self.mlp_label_l = MLP(n_in=args.n_lstm_hidden*2,\n n_out=args.n_mlp_label,\n dropout=args.mlp_dropout)\n self.mlp_label_r = MLP(n_in=args.n_lstm_hidden*2,\n n_out=args.n_mlp_label,\n dropout=args.mlp_dropout)\n\n # the Biaffine layers\n self.span_attn = Biaffine(n_in=args.n_mlp_span,\n bias_x=True,\n bias_y=False)\n self.label_attn = Biaffine(n_in=args.n_mlp_label,\n n_out=args.n_labels,\n bias_x=True,\n bias_y=True)\n self.pad_index = args.pad_index\n self.unk_index = args.unk_index\n\n def load_pretrained(self, embed_dict=None):\n embed = embed_dict['embed'] if isinstance(\n embed_dict, dict) and 'embed' in embed_dict else None\n if embed is not None:\n self.pretrained = True\n self.char_pretrained = nn.Embedding.from_pretrained(embed)\n nn.init.zeros_(self.char_embed.weight)\n if self.args.feat == 'bigram':\n embed = embed_dict['bi_embed']\n self.bi_pretrained = nn.Embedding.from_pretrained(embed)\n nn.init.zeros_(self.bigram_embed.weight)\n elif self.args.feat == 'trigram':\n bi_embed = embed_dict['bi_embed']\n tri_embed = embed_dict['tri_embed']\n self.bi_pretrained = nn.Embedding.from_pretrained(bi_embed)\n self.tri_pretrained = nn.Embedding.from_pretrained(tri_embed)\n nn.init.zeros_(self.bigram_embed.weight)\n nn.init.zeros_(self.trigram_embed.weight)\n return self\n\n def forward(self, feed_dict):\n chars = feed_dict[\"chars\"]\n batch_size, seq_len = chars.shape\n # get the mask and lengths of given batch\n mask = chars.ne(self.pad_index)\n lens = mask.sum(dim=1)\n ext_chars = chars\n # set the indices larger than num_embeddings to unk_index\n if self.pretrained:\n ext_mask = chars.ge(self.char_embed.num_embeddings)\n ext_chars = chars.masked_fill(ext_mask, self.unk_index)\n\n # get outputs from embedding layers\n char_embed = self.char_embed(ext_chars)\n if self.pretrained:\n char_embed += self.char_pretrained(chars)\n\n if self.args.feat == 'bert':\n feats = feed_dict[\"feats\"]\n feat_embed = self.feat_embed(*feats)\n char_embed, feat_embed = self.embed_dropout(char_embed, feat_embed)\n embed = torch.cat((char_embed, feat_embed), dim=-1)\n elif self.args.feat == 'bigram':\n bigram = feed_dict[\"bigram\"]\n ext_bigram = bigram\n if self.pretrained:\n ext_mask = bigram.ge(self.bigram_embed.num_embeddings)\n ext_bigram = bigram.masked_fill(ext_mask, self.unk_index)\n bigram_embed = self.bigram_embed(ext_bigram)\n if self.pretrained:\n bigram_embed += self.bi_pretrained(bigram)\n char_embed, bigram_embed = self.embed_dropout(\n char_embed, bigram_embed)\n embed = torch.cat((char_embed, bigram_embed), dim=-1)\n elif self.args.feat == 'trigram':\n bigram = feed_dict[\"bigram\"]\n trigram = feed_dict[\"trigram\"]\n ext_bigram = bigram\n ext_trigram = trigram\n if self.pretrained:\n ext_mask = bigram.ge(self.bigram_embed.num_embeddings)\n ext_bigram = bigram.masked_fill(ext_mask, self.unk_index)\n ext_mask = trigram.ge(self.trigram_embed.num_embeddings)\n ext_trigram = trigram.masked_fill(ext_mask, self.unk_index)\n bigram_embed = self.bigram_embed(ext_bigram)\n trigram_embed = self.trigram_embed(ext_trigram)\n if self.pretrained:\n bigram_embed += self.bi_pretrained(bigram)\n trigram_embed += self.tri_pretrained(trigram)\n char_embed, bigram_embed, trigram_embed = self.embed_dropout(\n char_embed, bigram_embed, trigram_embed)\n embed = torch.cat(\n (char_embed, bigram_embed, trigram_embed), dim=-1)\n else:\n embed = self.embed_dropout(char_embed)[0]\n\n x = pack_padded_sequence(embed, lens, True, False)\n x, _ = self.lstm(x)\n x, _ = pad_packed_sequence(x, True, total_length=seq_len)\n x = self.lstm_dropout(x)\n\n x_f, x_b = x.chunk(2, dim=-1)\n x = torch.cat((x_f[:, :-1], x_b[:, 1:]), -1)\n # apply MLPs to the BiLSTM output states\n span_l = self.mlp_span_l(x)\n span_r = self.mlp_span_r(x)\n label_l = self.mlp_label_l(x)\n label_r = self.mlp_label_r(x)\n\n # [batch_size, seq_len, seq_len]\n s_span = self.span_attn(span_l, span_r)\n # [batch_size, seq_len, seq_len, n_labels]\n s_label = self.label_attn(label_l, label_r).permute(0, 2, 3, 1)\n\n return s_span, s_label\n\n @classmethod\n def load(cls, path):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n state = torch.load(path, map_location=device)\n model = cls(state['args'])\n model.load_pretrained(state['pretrained'])\n model.load_state_dict(state['state_dict'], False)\n model.to(device)\n\n return model\n\n def save(self, path):\n state_dict, pretrained = self.state_dict(), None\n if self.pretrained:\n pretrained = {'embed': state_dict.pop('char_pretrained.weight')}\n if hasattr(self, 'bi_pretrained'):\n pretrained.update(\n {'bi_embed': state_dict.pop('bi_pretrained.weight')})\n if hasattr(self, 'tri_pretrained'):\n pretrained.update(\n {'tri_embed': state_dict.pop('tri_pretrained.weight')})\n state = {\n 'args': self.args,\n 'state_dict': state_dict,\n 'pretrained': pretrained\n }\n torch.save(state, path)\n",
"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, timedelta\nfrom parser import Model\nfrom parser.cmds.cmd import CMD\nfrom parser.utils.corpus import Corpus\nfrom parser.utils.data import TextDataset, batchify\nfrom parser.utils.metric import Metric\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import ExponentialLR\n\n\nclass Train(CMD):\n\n def add_subparser(self, name, parser):\n subparser = parser.add_parser(\n name, help='Train a model.'\n )\n subparser.add_argument('--ftrain', default='data/ctb51/train.pid',\n help='path to train file')\n subparser.add_argument('--fdev', default='data/ctb51/dev.pid',\n help='path to dev file')\n subparser.add_argument('--ftest', default='data/ctb51/test.pid',\n help='path to test file')\n subparser.add_argument('--embed', action='store_true',\n help='whether to use pretrained embeddings')\n subparser.add_argument('--unk', default=None,\n help='unk token in pretrained embeddings')\n subparser.add_argument('--dict-file', default=None,\n help='path for dictionary')\n return subparser\n\n def __call__(self, args):\n super(Train, self).__call__(args)\n\n train = Corpus.load(args.ftrain, self.fields)\n dev = Corpus.load(args.fdev, self.fields)\n test = Corpus.load(args.ftest, self.fields)\n\n train = TextDataset(\n train, self.fields, args.buckets)\n dev = TextDataset(\n dev, self.fields, args.buckets)\n test = TextDataset(\n test, self.fields, args.buckets)\n # set the data loaders\n train.loader = batchify(train, args.batch_size, True)\n dev.loader = batchify(dev, args.batch_size)\n test.loader = batchify(test, args.batch_size)\n print(f\"{'train:':6} {len(train):5} sentences, \"\n f\"{len(train.loader):3} batches, \"\n f\"{len(train.buckets)} buckets\")\n print(f\"{'dev:':6} {len(dev):5} sentences, \"\n f\"{len(dev.loader):3} batches, \"\n f\"{len(train.buckets)} buckets\")\n print(f\"{'test:':6} {len(test):5} sentences, \"\n f\"{len(test.loader):3} batches, \"\n f\"{len(train.buckets)} buckets\")\n\n print(\"Create the model\")\n embed = {'embed': self.CHAR.embed}\n if hasattr(self, 'BIGRAM'):\n embed.update({\n 'bi_embed': self.BIGRAM.embed,\n })\n if hasattr(self, 'TRIGRAM'):\n embed.update({\n 'tri_embed': self.TRIGRAM.embed,\n })\n self.model = Model(args).load_pretrained(embed)\n print(f\"{self.model}\\n\")\n self.model = self.model.to(args.device)\n if torch.cuda.device_count() > 1:\n self.model = nn.DataParallel(self.model)\n self.optimizer = Adam(self.model.parameters(),\n args.lr,\n (args.mu, args.nu),\n args.epsilon)\n decay_steps = args.decay_epochs * len(train.loader)\n self.scheduler = ExponentialLR(self.optimizer,\n args.decay**(1/decay_steps))\n\n total_time = timedelta()\n best_e, best_metric = 1, Metric()\n\n for epoch in range(1, args.epochs + 1):\n start = datetime.now()\n self.train(train.loader)\n\n print(f\"Epoch {epoch} / {args.epochs}:\")\n loss, dev_metric = self.evaluate(dev.loader)\n print(f\"{'dev:':6} Loss: {loss:.4f} {dev_metric}\")\n loss, test_metric = self.evaluate(test.loader)\n print(f\"{'test:':6} Loss: {loss:.4f} {test_metric}\")\n\n t = datetime.now() - start\n # save the model if it is the best so far\n if dev_metric > best_metric and epoch > args.patience//10:\n best_e, best_metric = epoch, dev_metric\n if hasattr(self.model, 'module'):\n self.model.module.save(args.model)\n else:\n self.model.save(args.model)\n print(f\"{t}s elapsed (saved)\\n\")\n else:\n print(f\"{t}s elapsed\\n\")\n total_time += t\n if epoch - best_e >= args.patience and epoch >= args.min_training_epoch:\n break\n self.model = Model.load(args.model)\n loss, metric = self.evaluate(test.loader)\n\n print(f\"max score of dev is {best_metric.score:.2%} at epoch {best_e}\")\n print(f\"the score of test at epoch {best_e} is {metric.score:.2%}\")\n print(f\"average time of each epoch is {total_time / epoch}s\")\n print(f\"{total_time}s elapsed\")\n"
] |
[
[
"torch.load",
"torch.cat",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Embedding",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Embedding.from_pretrained",
"torch.cuda.is_available",
"torch.nn.init.zeros_",
"torch.save"
],
[
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.optim.lr_scheduler.ExponentialLR"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mieskolainen/icenet
|
[
"030e2ab658ebc1d83f20cb24dca2bb46b8ac44ca",
"030e2ab658ebc1d83f20cb24dca2bb46b8ac44ca"
] |
[
"iceplot/iceplot.py",
"analysis/trg_train.py"
] |
[
"# Advanced histogramming & automated plotting functions\n# \n# (c) 2021 Mikael Mieskolainen\n# Licensed under the MIT License <http://opensource.org/licenses/MIT>.\n\n\nimport pathlib\nimport matplotlib\nmatplotlib.use('Agg') # Important for multithreaded applications\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport math\nimport copy\n\n\ndef chi2_cost(h_mc, h_data):\n \"\"\"\n Chi2 cost function between two histograms\n \"\"\"\n counts_mc = h_mc.counts * h_mc.binscale\n err_mc = h_mc.errs * h_mc.binscale\n\n counts_data = h_data.counts * h_data.binscale\n err_data = h_data.errs * h_data.binscale\n\n return np.sum((counts_mc - counts_data)**2 / (err_mc**2 + err_data**2))\n\n\ndef set_global_style(dpi=120, figsize=(4,3.75), font='serif', font_size=8, legend_fontsize=7, legend_handlelength=1):\n \"\"\" Set global plot style.\n \"\"\"\n plt.rcParams['legend.fontsize'] = legend_fontsize\n plt.rcParams['legend.handlelength'] = legend_handlelength\n\n plt.rcParams['figure.dpi'] = dpi\n plt.rcParams['figure.figsize'] = figsize\n\n plt.rcParams['font.family'] = font\n plt.rcParams['font.size'] = font_size\n\n\n# Colors\nimperial_dark_blue = (0, 0.24, 0.45)\nimperial_light_blue = (0, 0.43, 0.69)\nimperial_dark_red = (0.75, 0.10, 0.0)\nimperial_green = (0.0, 0.54, 0.23)\n\n\ndef colors(i, power=0.34):\n\n c = [imperial_dark_red, imperial_dark_blue, imperial_green, imperial_light_blue]\n\n if i < len(c):\n return c[i]\n else:\n return c[i%len(c)] * (1.0/power)\n\n\n\"\"\" Global marker styles\n\nzorder : approximate plotting order\nlw : linewidth\nls : linestyle\n\"\"\"\nerrorbar_style = {'zorder': 3, 'ls': ' ', 'lw': 1, 'marker': 'o', 'markersize': 2.5}\nplot_style = {'zorder': 2, 'ls': '-', 'lw': 1}\nhist_style_step = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'step'}\nhist_style_fill = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'stepfilled'}\nhist_style_bar = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'bar'}\n\n\nclass hobj:\n \"\"\" Minimal histogram data object.\n \"\"\"\n def __init__(self, counts = 0, errs = 0, bins = 0, cbins = 0, binscale=1.0):\n self.counts = counts\n self.errs = errs\n self.bins = bins\n self.cbins = cbins\n self.binscale = binscale\n\n if (np.sum(counts) == 0):\n self.is_empty = True\n else:\n self.is_empty = False\n\n # + operator\n def __add__(self, other):\n\n if (self.is_empty == True): # Take the rhs\n return other\n\n if ((self.bins == other.bins).all() == False):\n raise(__name__ + ' + operator: cannot operate on different sized histograms')\n\n # Harmonic sum\n binscale = 1/(1/self.binscale + 1/other.binscale)\n\n counts = self.counts + other.counts\n errs = np.sqrt(self.errs**2 + other.errs**2)\n\n return hobj(counts, errs, bins, cbins, binscale)\n \n # += operator\n def __iadd__(self, other):\n\n if (self.is_empty == True): # Still empty\n return other\n\n if ((self.bins == other.bins).all() == False):\n raise(__name__ + ' += operator: cannot operate on different sized histograms')\n\n self.counts = self.counts + other.counts\n self.errs = np.sqrt(self.errs**2 + other.errs**2)\n \n # Harmonic sum\n self.binscale = 1/(1/self.binscale + 1/other.binscale)\n\n return self\n\n\ndef stepspace(start, stop, step):\n \"\"\" Linear binning edges between [start, stop]\n \"\"\"\n return np.arange(start, stop + step, step)\n\n\ndef plot_horizontal_line(ax, color=(0.5,0.5,0.5), linewidth=0.9):\n \"\"\" For the ratio plot\n \"\"\"\n xlim = ax.get_xlim()\n ax.plot(np.linspace(xlim[0], xlim[1], 2), np.array([1,1]), color=color, linewidth=linewidth)\n\n\ndef tick_calc(lim, step, N=6):\n \"\"\" Tick spacing calculator.\n \"\"\"\n return [np.round(lim[0] + i*step, N) for i in range(1 + math.floor((lim[1] - lim[0])/step))]\n\n\ndef set_axis_ticks(ax, ticks, dim='x'):\n \"\"\" Set ticks of the axis.\n \"\"\"\n if (dim == 'x'):\n ax.set_xticks(ticks)\n ax.set_xticklabels(list(map(str, ticks)))\n elif (dim == 'y'):\n ax.set_yticks(ticks)\n ax.set_yticklabels(list(map(str, ticks)))\n\n\ndef tick_creator(ax, xtick_step=None, ytick_step=None, ylim_ratio=(0.7, 1.3),\n ratio_plot=True, minorticks_on=True, ytick_ratio_step=0.15, labelsize=9,\n labelsize_ratio=8, **kwargs) :\n \"\"\" Axis tick constructor.\n \"\"\"\n\n # Get limits\n xlim = ax[0].get_xlim()\n ylim = ax[0].get_ylim()\n\n # X-axis\n if (xtick_step is not None):\n ticks = tick_calc(lim=xlim, step=xtick_step)\n set_axis_ticks(ax[-1], ticks, 'x')\n\n # Y-axis\n if (ytick_step is not None): \n ticks = tick_calc(lim=ylim, step=ytick_step)\n set_axis_ticks(ax[0], ticks, 'y')\n\n # Y-ratio-axis\n if ratio_plot:\n ax[0].tick_params(labelbottom=False)\n ax[1].tick_params(axis='y', labelsize=labelsize_ratio)\n\n ticks = tick_calc(lim=ylim_ratio, step=ytick_ratio_step)\n ticks = ticks[1:-1] # Remove the first and the last\n set_axis_ticks(ax[1], ticks, 'y')\n\n # Tick settings\n for a in ax:\n if minorticks_on: a.minorticks_on()\n a.tick_params(top=True, bottom=True, right=True, left=True, which='both', direction='in', labelsize=labelsize)\n\n return ax\n\ndef create_axes(xlabel='$x$', ylabel=r'Counts', ylabel_ratio='Ratio',\n xlim=(0,1), ylim=None, ylim_ratio=(0.7, 1.3),\n ratio_plot=True, figsize=(5,4), fontsize=9, units={'x': '', 'y': ''}, **kwargs):\n \"\"\" Axes creator.\n \"\"\"\n \n # Create subplots\n N = 2 if ratio_plot else 1\n gridspec_kw = {'height_ratios': (3.333, 1) if ratio_plot else (1,), 'hspace': 0.0}\n fig, ax = plt.subplots(N, figsize=figsize, gridspec_kw=gridspec_kw)\n ax = [ax] if (N == 1) else ax\n\n # Axes limits\n for a in ax:\n if xlim is not None:\n a.set_xlim(*xlim)\n\n if ylim is not None:\n ax[0].set_ylim(*ylim)\n\n # Axes labels\n if kwargs['density']:\n ylabel = f'$1/N$ {ylabel} / [{units[\"x\"]}]'\n else:\n ylabel = f'{ylabel} [{units[\"y\"]} / {units[\"x\"]}]'\n xlabel = f'{xlabel} [{units[\"x\"]}]'\n \n ax[0].set_ylabel(ylabel, fontsize=fontsize)\n ax[-1].set_xlabel(xlabel, fontsize=fontsize)\n\n # Ratio plot\n if ratio_plot:\n ax[1].set_ylabel(ylabel_ratio, fontsize=fontsize)\n ax[1].set_ylim(*ylim_ratio)\n\n # Setup ticks\n ax = tick_creator(ax=ax, ratio_plot=ratio_plot, **kwargs)\n\n return fig, ax\n\n\ndef ordered_legend(ax=None, order=None, frameon=False, unique=False, **kwargs):\n \"\"\" Ordered legends.\n \"\"\"\n\n def unique_everseen(seq, key=None):\n seen = set()\n seen_add = seen.add\n return [x for x,k in zip(seq,key) if not (k in seen or seen_add(k))]\n\n if ax is None: ax=plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n\n # Sort both labels and handles by labels\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n \n # Sort according to a given list, which may be incomplete\n if order is not None: \n keys=dict(zip(order,range(len(order))))\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t, keys=keys: keys.get(t[0],np.inf)))\n\n # Keep only the first of each handle\n if unique: labels, handles= zip(*unique_everseen(zip(labels,handles), key = labels)) \n ax.legend(handles, labels, frameon=frameon, **kwargs)\n\n return (handles, labels)\n\n\ndef binwidth(bins):\n \"\"\" Return binwidth from a linear array \"\"\"\n return (bins[1:] - bins[0:-1])\n\n\ndef edge2centerbins(bins) :\n \"\"\" Get centerbins from edgebins.\n \"\"\"\n return (bins[1:] + bins[0:-1])/2\n\n\ndef ratioerr(A, B, sigma_A, sigma_B, sigma_AB = 0, EPS = 1E-15):\n \"\"\" Ratio f(A,B) = A/B error, by Taylor expansion of f.\n \"\"\"\n A[np.abs(A) < EPS] = EPS\n B[np.abs(B) < EPS] = EPS\n return np.abs(A/B) * np.sqrt((sigma_A/A)**2 + (sigma_B/B)**2 - 2*sigma_AB/(A*B))\n\n\ndef hist_to_density(counts, errs, bins):\n \"\"\" Normalize to unit integral density function over the visible histogram range \"\"\"\n\n norm = binwidth(bins) * counts.sum()\n return counts/norm, errs/norm\n\n\ndef hist_to_density_fullspace(counts, errs, bins, totalweight):\n \"\"\" Normalize histogram to a unit integral density function\n over total sum of event weights (not just the visible histogram range mass)\n \"\"\"\n norm = binwidth(bins) * totalweight\n return counts/norm, errs/norm\n\n\ndef hist(x, bins=30, density=False, weights=None):\n \"\"\" Calculate a histogram.\n \"\"\"\n x = np.asarray(x, dtype=np.float64)\n\n # Calculate histogram\n if weights is None:\n weights = np.ones(x.shape)\n\n weights = np.array(weights)\n\n counts, bins = np.histogram(x, bins=bins, weights=weights)\n cbins = edge2centerbins(bins)\n\n # Input data to histogram bins\n # Weighted error on bin counts given by (square root of) sum of squared weights\n inds = np.digitize(x, bins)\n errs = np.asarray([np.linalg.norm(weights[inds==k],2) for k in range(1, len(bins))])\n\n # Density integral 1 over the histogram bins range\n if density:\n counts, errs = hist_to_density(counts=counts, errs=errs, bins=bins)\n\n return counts, errs, bins, cbins\n\n\ndef hist_obj(x, bins=30, weights=None):\n \"\"\" A wrapper to return a histogram object.\n \"\"\"\n counts, errs, bins, cbins = hist(x, bins=bins, weights=weights)\n return hobj(counts, errs, bins, cbins)\n\n\ndef generate_colormap():\n \"\"\" Default colormap.\n \"\"\"\n # Take colors\n color = plt.cm.Set1(np.linspace(0,1,10))\n\n # Add black \n black = np.ones((1,4))\n black[:,0:3] = 0.0\n color = np.concatenate((black, color)) \n\n return color\n\n\ndef hist_filled_error(ax, bins, cbins, y, err, color, **kwargs):\n \"\"\" Stephist style error.\n \"\"\"\n new_args = kwargs.copy()\n new_args['lw'] = 0\n new_args.pop('histtype', None) # Remove\n\n ax.fill_between(bins[0:-1], y-err, y+err, step='post', alpha=0.3, color=color, **new_args)\n\n # The last bin\n ax.fill_between(bins[-2:], (y-err)[-2:], (y+err)[-2:], step='pre', alpha=0.3, color=color, **new_args)\n\n\ndef superplot(data, observable=None, ratio_plot=True, yscale='linear', ratio_error_plot=True, \\\n legend_counts=False, color=None, legend_properties={'fontsize': 9}, bottom_PRC=5, EPS=1E-12):\n \"\"\" Superposition (overlaid) plotting\n \"\"\"\n if observable == None:\n observable = data[0]['obs']\n \n fig, ax = create_axes(**observable, ratio_plot=ratio_plot)\n\n if color == None:\n color = generate_colormap()\n \n legend_labels = []\n\n # y-axis limit\n bottom_count = 1e32\n ceiling_count = 0\n\n # Plot histograms\n for i in range(len(data)):\n\n if data[i]['hdata'].is_empty:\n print(__name__ + f'.superplot: Skipping empty histogram for entry {i}')\n continue\n\n c = data[i]['color']\n if c is None: c = color[i]\n\n counts = data[i]['hdata'].counts * data[i]['hdata'].binscale\n errs = data[i]['hdata'].errs * data[i]['hdata'].binscale\n bins = data[i]['hdata'].bins\n cbins = data[i]['hdata'].cbins\n\n # -----------------------------------------------\n # ** For visualization autolimits **\n # Use percentile for the bottom (~ handle noisy small bins)\n bottom_count = np.min([bottom_count, np.percentile(counts[counts > EPS], bottom_PRC)])\n ceiling_count = np.max([ceiling_count, np.max(counts[counts > 0])])\n # -----------------------------------------------\n \n label = data[i]['label']\n if legend_counts == True:\n label += f' $N={np.sum(data[i][\"hdata\"].counts):.1f}$'\n\n legend_labels.append(label)\n\n if data[i]['hfunc'] == 'hist' :\n ax[0].hist(x=cbins, bins=bins, weights=counts, color=c, label=label, **data[i]['style'])\n hist_filled_error(ax=ax[0], bins=bins, cbins=cbins, y=counts, err=errs, color=c, **data[i]['style'])\n\n elif data[i]['hfunc'] == 'errorbar' :\n ax[0].errorbar(x=cbins, y=counts, yerr=errs, color=c, label=label, **data[i]['style'])\n\n elif data[i]['hfunc'] == 'plot' :\n ax[0].plot(cbins, counts, color=c, label=label, **data[i]['style'])\n \n new_args = data[i]['style'].copy()\n new_args['lw'] = 0\n ax[0].fill_between(cbins, counts-errs, counts+errs, alpha=0.3, color=c, **new_args)\n\n # Plot ratiohistograms\n if ratio_plot:\n\n plot_horizontal_line(ax[1])\n \n for i in range(len(data)):\n\n if data[i]['hdata'].is_empty:\n print(__name__ + f'.superplot: Skipping empty histogram for entry {i} (ratioplot)')\n continue\n\n c = data[i]['color']\n if c is None: c = color[i]\n\n A = data[i]['hdata'].counts * data[i]['hdata'].binscale\n B = data[0]['hdata'].counts * data[0]['hdata'].binscale\n sigma_A = data[i]['hdata'].errs * data[i]['hdata'].binscale\n sigma_B = data[0]['hdata'].errs * data[0]['hdata'].binscale\n sigma_AB = 0\n ratio_errs = ratioerr(A=A, B=B, sigma_A=sigma_A, sigma_B=sigma_B, sigma_AB=sigma_AB)\n\n EPS = 1E-30\n ratio = A / (B + EPS)\n bins = data[i]['hdata'].bins\n cbins = data[i]['hdata'].cbins\n\n # If no errors turned on\n if ratio_error_plot == False:\n ratio_errs = np.zeros(ratio_errs.shape)\n\n if data[i]['hfunc'] == 'hist':\n ax[1].hist(x=cbins, bins=bins, weights=ratio, color=c, **data[i]['style']) \n hist_filled_error(ax=ax[1], bins=bins, cbins=cbins, y=ratio, err=ratio_errs, color=c, **data[i]['style'])\n\n elif data[i]['hfunc'] == 'errorbar':\n ax[1].errorbar(x=cbins, y=ratio, yerr=ratio_errs, color=c, **data[i]['style'])\n\n elif data[i]['hfunc'] == 'plot':\n ax[1].plot(cbins, ratio, color=c, **data[i]['style'])\n\n new_args = data[i]['style'].copy()\n new_args['lw'] = 0\n ax[1].fill_between(cbins, ratio-ratio_errs, ratio+ratio_errs, alpha=0.3, color=c, **new_args)\n \n # Legend\n if legend_labels != []:\n ordered_legend(ax = ax[0], order=legend_labels, **legend_properties)\n\n # --------------------------------------------------------------------\n # Upper figure\n\n # Log y-scale\n ax[0].set_yscale(yscale)\n\n # y-limits\n if observable['ylim'] is None:\n ylim_now = ax[0].get_ylim()\n if yscale == 'log':\n ax[0].set_ylim([bottom_count / 4, ceiling_count * 10])\n else:\n ax[0].set_ylim([0, ceiling_count * 1.5])\n else:\n ax[0].set_ylim(observables.ylim)\n # -------------------------------------------------------------------- \n\n return fig, ax\n\n\ndef change2density_labels(all_obs):\n \"\"\" Change to density ~ 1/N dN/dx [1/xdim] type label to y-axis \"\"\"\n\n for key in all_obs.keys():\n xlabel = all_obs[key]['xlabel'].replace('$', '')\n all_obs[key]['ylabel'] = '$\\\\frac{1}{N} \\\\; ' + f'dN/d{xlabel}$'\n all_obs[key]['units']['y'] = '1'\n\n return all_obs\n\n\ndef histmc(mcdata, all_obs, density=False, scale=None, color=(0,0,1), label='none', style=hist_style_step):\n \"\"\" Over all observables of an MC sample \"\"\"\n\n obj = {}\n\n for OBS in all_obs.keys():\n\n # Histogram it\n counts, errs, bins, cbins = hist(x=mcdata['data'][OBS], bins=all_obs[OBS]['bins'], weights=mcdata['weights'])\n\n # Compute differential cross section within histogram range\n # Note that division by sum(weights) handles the histogram range integral (overflow) properly\n binscale = mcdata['xsection_pb'] / binwidth(bins) / np.sum(mcdata['weights']) \n\n # Additional scale factor\n if scale is not None:\n binscale *= scale\n \n # Density integral 1 over the histogram bins\n if density:\n counts,errs = hist_to_density(counts=counts, errs=errs, bins=bins)\n binscale = 1.0\n \n obj[OBS] = {'hdata': hobj(counts, errs, bins, cbins, binscale), 'hfunc' : 'hist', 'color': color, 'label': label, 'style' : style}\n\n return obj\n\n\ndef histhepdata(hepdata, all_obs, scale=None, density=False, MC_XS_SCALE=1E12, label='Data', style=hist_style_step):\n\n # Over all observables\n obj = {}\n\n for OBS in all_obs.keys():\n\n # Over all DATA files (now fixed to one)\n data_obj = []\n\n y = hepdata[OBS]['y'] \n yerr = hepdata[OBS]['y_err']\n bins = hepdata[OBS]['bins']\n cbins = hepdata[OBS]['x']\n\n binscale = hepdata[OBS]['scale'] * MC_XS_SCALE\n\n # Additional scale factor\n if scale is not None:\n binscale *= scale\n \n # Density integral 1 over the histogram bins\n if density:\n norm = hepdata[OBS]['binwidth'] * y.sum()\n y /= norm\n yerr /= norm\n binscale = 1.0\n\n obj[OBS] = {'hdata': hobj(y, yerr, bins, cbins, binscale), 'hfunc' : 'hist', 'color': (0,0,0), 'label': label, 'style' : style}\n \n return obj\n\n\ndef fuse_histograms(hist_list):\n \"\"\"\n Fuse a list of count histogram objects\n \"\"\"\n hsum = copy.deepcopy(hist_list[0])\n for c in range(1, len(hist_list)):\n for OBS in hist_list[0].keys():\n hsum[OBS]['hdata'] += hist_list[c][OBS]['hdata']\n \n return hsum\n\n\ndef test_iceplot():\n \"\"\" Visual unit tests \"\"\"\n \n import pytest\n import pathlib\n\n pathlib.Path(\"./testfigs\").mkdir(parents=True, exist_ok=True)\n\n\n # ------------------------------------------------------------------------\n set_global_style()\n\n\n # Synthetic input data\n r1 = np.random.randn(25000) * 0.8\n r2 = np.random.randn(25000) * 1\n r3 = np.random.randn(25000) * 1.2\n r4 = np.random.randn(25000) * 1.5\n\n\n # ------------------------------------------------------------------------\n # Mathematical definitions\n\n # Momentum squared\n def pt2(x):\n return np.power(x,2);\n\n\n # ------------------------------------------------------------------------\n # Observables containers\n\n obs_pt2 = {\n\n # Axis limits\n 'xlim' : (0, 1.5),\n 'ylim' : None,\n 'xlabel' : r'$p_t^2$',\n 'ylabel' : r'Counts',\n 'units' : {'x': r'GeV$^2$', 'y' : r'counts'},\n 'label' : r'Transverse momentum squared',\n 'figsize' : (4, 3.75),\n\n # Ratio\n 'ylim_ratio' : (0.7, 1.3),\n\n # Histogramming\n 'bins' : np.linspace(0, 1.5, 60),\n 'density' : False,\n \n # Function to calculate\n 'func' : pt2\n }\n\n\n # ------------------------------------------------------------------------\n # ** Example **\n\n fig1, ax1 = create_axes(**obs_pt2, ratio_plot=False)\n counts, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])\n ax1[0].errorbar(x=cbins, y=counts, yerr=errs, color=(0,0,0), label='Data $\\\\alpha$', **errorbar_style)\n ax1[0].legend(frameon=False)\n fig1.savefig('./testfigs/testplot_1.pdf', bbox_inches='tight')\n\n\n # ------------------------------------------------------------------------\n # ** Example **\n\n fig2, ax2 = create_axes(**obs_pt2, ratio_plot=False)\n counts, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])\n ax2[0].hist(x=cbins, bins=bins, weights=counts, color=(0.5, 0.2, 0.1), label='Data $\\\\alpha$', **hist_style_step)\n ax2[0].legend(frameon=False)\n fig2.savefig('./testfigs/testplot_2.pdf', bbox_inches='tight')\n\n\n # ------------------------------------------------------------------------\n # ** Example **\n\n fig3, ax3 = create_axes(**obs_pt2, ratio_plot=True)\n\n counts1, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])\n ax3[0].hist(x=cbins, bins=bins, weights=counts1, color=(0,0,0), label='Data 1', **hist_style_step)\n\n counts2, errs, bins, cbins = hist(obs_pt2['func'](r2), bins=obs_pt2['bins'], density=obs_pt2['density'])\n ax3[0].hist(x=cbins, bins=bins, weights=counts2, color=(1,0,0), alpha=0.5, label='Data 2', **hist_style_step)\n\n ordered_legend(ax = ax3[0], order=['Data 1', 'Data 2'])\n\n # Ratio\n plot_horizontal_line(ax3[1])\n ax3[1].hist(x=cbins, bins=bins, weights=counts2 / (counts1 + 1E-30), color=(1,0,0), alpha=0.5, label='Data $\\\\beta$', **hist_style_step)\n\n fig3.savefig('./testfigs/testplot_3.pdf', bbox_inches='tight')\n\n\n # ------------------------------------------------------------------------\n # ** Example **\n\n data_template = {\n 'data' : None,\n 'weights': None,\n 'label' : 'Data',\n 'hfunc' : 'errorbar',\n 'style' : errorbar_style,\n 'obs' : obs_pt2,\n 'hdata' : None,\n 'color' : None\n }\n\n # Data source <-> Observable collections\n data1 = data_template.copy() # Deep copies\n data2 = data_template.copy()\n data3 = data_template.copy()\n data4 = data_template.copy()\n\n\n data1.update({\n 'data' : r1,\n 'label' : 'Data $\\\\alpha$',\n 'hfunc' : 'errorbar',\n 'style' : errorbar_style,\n })\n data2.update({\n 'data' : r2,\n 'label' : 'Data $\\\\beta$',\n 'hfunc' : 'hist',\n 'style' : hist_style_step,\n })\n data3.update({\n 'data' : r3,\n 'label' : 'Data $\\\\gamma$',\n 'hfunc' : 'hist',\n 'style' : hist_style_step,\n })\n data4.update({\n 'data' : r4,\n 'label' : 'Data $\\\\delta$',\n 'hfunc' : 'plot',\n 'style' : plot_style,\n })\n\n data = [data1, data2, data3, data4]\n\n\n # Calculate histograms\n for i in range(len(data)):\n data[i]['hdata'] = hist_obj(data[i]['obs']['func'](data[i]['data']), bins=data[i]['obs']['bins'])\n\n # Plot it\n fig4, ax4 = superplot(data, ratio_plot=True, yscale='log')\n fig5, ax5 = superplot(data, ratio_plot=True, yscale='linear', ratio_error_plot=False)\n\n fig4.savefig('./testfigs/testplot_4.pdf', bbox_inches='tight')\n fig5.savefig('./testfigs/testplot_5.pdf', bbox_inches='tight')\n\n",
"# Electron HLT trigger [TRAINING] steering code\n#\n# Mikael Mieskolainen, 2021\n# [email protected]\n\n# icenet system paths\nimport sys\nsys.path.append(\".\")\n\nimport math\nimport numpy as np\nimport torch\nimport os\nimport pickle\nimport sys\nfrom termcolor import cprint\n\n# matplotlib\nfrom matplotlib import pyplot as plt\n\n# icenet\nfrom icenet.tools import io\nfrom icenet.tools import aux\nfrom icenet.tools import reweight\n\nfrom icenet.tools import plots\nfrom icenet.tools import prints\nfrom icenet.tools import process\n\n\n# icetrg\nfrom icetrg import common\n\n\n# Main function\n#\ndef main() :\n\n ### Get input\n data, args, features = common.init()\n\n ### Print ranges\n #prints.print_variables(X=data.trn.x, ids=data.ids)\n \n ### Compute reweighting weights\n trn_weights = reweight.compute_ND_reweights(x=data.trn.x, y=data.trn.y, ids=data.ids, args=args['reweight_param'])\n\n\n ### Plot some kinematic variables\n targetdir = f'./figs/trg/{args[\"config\"]}/reweight/1D_kinematic/'\n os.makedirs(targetdir, exist_ok = True)\n for k in ['x_hlt_pt', 'x_hlt_eta']:\n plots.plotvar(x = data.trn.x[:, data.ids.index(k)], y = data.trn.y, weights = trn_weights, var = k, NBINS = 70,\n targetdir = targetdir, title = f\"training re-weight reference class: {args['reweight_param']['reference_class']}\")\n\n ### Plot variables\n if args['plot_param']['basic_on'] == True:\n print(__name__ + f': plotting basic histograms ...')\n targetdir = f'./figs/trg/{args[\"config\"]}/train/1D_all/'; os.makedirs(targetdir, exist_ok = True)\n plots.plotvars(X = data.trn.x, y = data.trn.y, NBINS = 70, ids = data.ids,\n weights = trn_weights, targetdir = targetdir, title = f'training reweight reference: {args[\"reweight_param\"][\"mode\"]}')\n\n ### Split and factor data\n data, data_kin = common.splitfactor(data=data, args=args)\n\n ### Print scalar variables\n fig,ax = plots.plot_correlations(data.trn.x, data.ids)\n targetdir = f'./figs/trg/{args[\"config\"]}/train/'; os.makedirs(targetdir, exist_ok = True)\n plt.savefig(fname = targetdir + 'correlations.pdf', pad_inches = 0.2, bbox_inches='tight')\n \n print(__name__ + ': Active variables:')\n prints.print_variables(X=data.trn.x, ids=data.ids)\n \n # Add args['modeldir']\n args[\"modeldir\"] = f'./checkpoint/trg/{args[\"config\"]}/'; os.makedirs(args[\"modeldir\"], exist_ok = True)\n \n ### Execute training\n process.train_models(data = data, data_kin = data_kin, trn_weights = trn_weights, args = args)\n \n print(__name__ + ' [done]')\n\n\nif __name__ == '__main__' :\n main()\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.concatenate",
"numpy.round",
"numpy.max",
"numpy.random.randn",
"numpy.digitize",
"numpy.histogram",
"matplotlib.pyplot.gca",
"numpy.arange",
"numpy.zeros",
"numpy.power",
"numpy.array",
"numpy.sum",
"numpy.abs",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"numpy.ones",
"numpy.percentile"
],
[
"matplotlib.pyplot.savefig"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dmishin/fft-image-experiments
|
[
"3caf46f06de65d06072f5863907cd7998ba005a9"
] |
[
"dragon.py"
] |
[
"import numpy as np\r\n\r\ndef turn(n):\r\n \"\"\"Formula from WIkipedia.\r\n n could be numpy array of integers\r\n \"\"\"\r\n return (((n & -n) << 1) & n) != 0 \r\n\r\ndef dragon(N):\r\n \"\"\"Generate dragon curve\r\n Returns a pair of integer arrays, (x,y), each 2^N elements long\r\n \"\"\"\r\n t = turn(np.linspace(0, 2**N-1, 2**N, dtype=np.int32))\r\n\r\n a = np.remainder(np.cumsum(t*2-1), 4)\r\n\r\n # 1 | 0\r\n # --+-- \r\n # 2 | 3\r\n dx = np.array([1, -1, -1, 1], dtype=np.int32)\r\n dy = np.array([1, 1, -1, -1], dtype=np.int32)\r\n \r\n \r\n x = np.cumsum(dx[a])\r\n y = np.cumsum(dy[a])\r\n\r\n return x-((dx-1)//2)[a],y-((dy-1)//2)[a]\r\n\r\ndef dragon_binary_diagram(N):\r\n \"\"\"Draw dragon curve on a bitmap\r\n Returned bitmap size is 2^N x 2^N\r\n \"\"\"\r\n #Prepare canvas to draw curve\r\n D = np.zeros((2**N,2**N), dtype=np.float32)\r\n \r\n #Get curve. Scale is 2x.\r\n dx, dy = dragon(2*N-1)\r\n\r\n dx *= 2\r\n dy *= 2\r\n\r\n #Center the curve.\r\n cx, cy = (int(dx.mean()), int(dy.mean()))\r\n x0 = cx - D.shape[0]//2\r\n y0 = cy - D.shape[1]//2\r\n dx -= x0\r\n dy -= y0\r\n\r\n #Given array of coordinates, writes 1 at theese coordinates, when they are inside canvas.\r\n def putOnesAt(dx,dy):\r\n inside = (dx >= 0) & (dx < D.shape[0]) & (dy>=0) & (dy<D.shape[0])\r\n #Take part of x,y coordinates that are inside the image, and write repeated pattern by them\r\n #\r\n D[dx[inside],dy[inside]] = 1\r\n\r\n #Draw corners\r\n putOnesAt(dx,dy)\r\n\r\n #Draw midpoints between corners\r\n dx1 = (dx[0:-1]+dx[1:])//2\r\n dy1 = (dy[0:-1]+dy[1:])//2\r\n putOnesAt(dx1,dy1)\r\n return D\r\n \r\n\r\ndef showdragon(N):\r\n pp.plot(*(dragon(N)+()))\r\n pp.show()\r\n\r\nif __name__==\"__main__\":\r\n from matplotlib import pyplot as pp\r\n order = 16\r\n print(\"Showing dragon curve of order {}\".format(order))\r\n showdragon(order)\r\n"
] |
[
[
"numpy.linspace",
"numpy.cumsum",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kumayu0108/model-zoo
|
[
"4285779f6ff51fa1efb0625d67b428e90c343c0c",
"4285779f6ff51fa1efb0625d67b428e90c343c0c"
] |
[
"super_resolution/VDSR_PyTorch/model.py",
"multimodal_models/WaveGAN_TensorFlow/wgan_gp.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom math import sqrt\n\nclass VDSR(nn.Module):\n def __init__(self):\n super(VDSR, self).__init__()\n self.layer = self.make_layer(18)\n self.conv1 = nn.Conv2d(1, 64, kernel_size=3,stride=1, padding=1, bias=False)\n self.conv2 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1, bias=False)\n self.relu = nn.ReLU(inplace=True)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, sqrt(2./n))\n\n def make_layer(self, num_layers):\n layers = []\n for _ in range(num_layers):\n layers.append(nn.Conv2d(64, 64, 3, 1, 1, bias=False))\n layers.append(nn.ReLU(inplace=True))\n return nn.Sequential(*layers)\n\n def forward(self, input):\n residual = input\n out = self.relu(self.conv1(input))\n out = self.layer(out)\n out = self.conv2(out)\n out = torch.add(out, residual)\n return out\n",
"import tensorflow as tf\n\nclass WGAN(tf.keras.Model):\n def __init__(\n self,\n discriminator,\n generator,\n latent_dim,\n discriminator_extra_steps=5,\n gp_weight=10.0,\n ):\n super(WGAN, self).__init__()\n self.discriminator = discriminator\n self.generator = generator\n self.latent_dim = latent_dim\n self.d_steps = discriminator_extra_steps\n self.gp_weight = gp_weight\n\n def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn):\n super(WGAN, self).compile()\n self.d_optimizer = d_optimizer\n self.g_optimizer = g_optimizer\n self.d_loss_fn = d_loss_fn\n self.g_loss_fn = g_loss_fn\n\n def gradient_penalty(self, batch_size, real_output, fake_output):\n # get the interplated image\n alpha = tf.random.normal([batch_size, 1, 1, 1], 0.0, 1.0)\n diff = fake_output - real_output\n interpolated = real_output + alpha * diff\n\n with tf.GradientTape() as gp_tape:\n gp_tape.watch(interpolated)\n # 1. Get the discriminator output for this interpolated image.\n pred = self.discriminator(interpolated, training=True)\n\n # 2. Calculate the gradients w.r.t to this interpolated image.\n grads = gp_tape.gradient(pred, [interpolated])[0]\n # 3. Calcuate the norm of the gradients\n norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))\n gp = tf.reduce_mean((norm - 1.0) ** 2)\n return gp\n\n def train_step(self, real_output):\n if isinstance(real_output, tuple):\n real_output = real_output[0]\n\n # Get the batch size\n batch_size = tf.shape(real_output)[0]\n for i in range(int(self.d_steps)):\n # Get the latent vector\n random_latent_vectors = tf.random.normal(\n shape=(batch_size, self.latent_dim)\n )\n with tf.GradientTape() as tape:\n # Generate fake output from the latent vector\n fake_output = self.generator(random_latent_vectors, training=True)\n # Get the logits for the fake output\n fake_logits = self.discriminator(fake_output, training=True)\n # Get the logits for real output\n real_logits = self.discriminator(real_output, training=True)\n\n # Calculate discriminator loss using fake and real logits\n d_cost = self.d_loss_fn(real_img=real_logits, fake_img=fake_logits)\n # Calculate the gradient penalty\n gp = self.gradient_penalty(batch_size, real_output, fake_output)\n # Add the gradient penalty to the original discriminator loss\n d_loss = d_cost + gp * self.gp_weight\n\n # Get the gradients w.r.t the discriminator loss\n d_gradient = tape.gradient(d_loss, self.discriminator.trainable_variables)\n # Update the weights of the discriminator using the discriminator optimizer\n self.d_optimizer.apply_gradients(\n zip(d_gradient, self.discriminator.trainable_variables)\n )\n\n # Train the generator now.\n # Get the latent vector\n random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n with tf.GradientTape() as tape:\n # Generate fake output using the generator\n generated_output = self.generator(random_latent_vectors, training=True)\n # Get the discriminator logits for fake output\n gen_img_logits = self.discriminator(generated_output, training=True)\n # Calculate the generator loss\n g_loss = self.g_loss_fn(gen_img_logits)\n\n # Get the gradients w.r.t the generator loss\n gen_gradient = tape.gradient(g_loss, self.generator.trainable_variables)\n # Update the weights of the generator using the generator optimizer\n self.g_optimizer.apply_gradients(\n zip(gen_gradient, self.generator.trainable_variables)\n )\n \n return {\"d_loss\": d_loss, \"g_loss\": g_loss}\n\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.add"
],
[
"tensorflow.shape",
"tensorflow.reduce_mean",
"tensorflow.square",
"tensorflow.random.normal",
"tensorflow.GradientTape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
ssalonen/pandas
|
[
"1929563fdb5358a41420d103a388aa2bd494d543",
"f9e0b7df8ca8a92133d3cea0a26181140f991e2d",
"1929563fdb5358a41420d103a388aa2bd494d543"
] |
[
"pandas/tools/tests/test_merge.py",
"pandas/core/config_init.py",
"pandas/computation/expr.py"
] |
[
"# pylint: disable=E1103\n\nimport nose\nimport unittest\n\nfrom datetime import datetime\nfrom numpy.random import randn\nfrom numpy import nan\nimport numpy as np\nimport random\n\nfrom pandas.compat import range, lrange, lzip, zip\nfrom pandas import compat\nfrom pandas.tseries.index import DatetimeIndex\nfrom pandas.tools.merge import merge, concat, ordered_merge, MergeError\nfrom pandas.util.testing import (assert_frame_equal, assert_series_equal,\n assert_almost_equal, rands,\n makeCustomDataframe as mkdf)\nfrom pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range\nimport pandas.algos as algos\nimport pandas.util.testing as tm\n\na_ = np.array\n\nN = 50\nNGROUPS = 8\nJOIN_TYPES = ['inner', 'outer', 'left', 'right']\n\n\ndef get_test_data(ngroups=NGROUPS, n=N):\n unique_groups = lrange(ngroups)\n arr = np.asarray(np.tile(unique_groups, n // ngroups))\n\n if len(arr) < n:\n arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])\n\n random.shuffle(arr)\n return arr\n\n\nclass TestMerge(unittest.TestCase):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n # aggregate multiple columns\n self.df = DataFrame({'key1': get_test_data(),\n 'key2': get_test_data(),\n 'data1': np.random.randn(N),\n 'data2': np.random.randn(N)})\n\n # exclude a couple keys for fun\n self.df = self.df[self.df['key2'] > 1]\n\n self.df2 = DataFrame({'key1': get_test_data(n=N // 5),\n 'key2': get_test_data(ngroups=NGROUPS // 2,\n n=N // 5),\n 'value': np.random.randn(N // 5)})\n\n index, data = tm.getMixedTypeDict()\n self.target = DataFrame(data, index=index)\n\n # Join on string value\n self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},\n index=data['C'])\n\n self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n self.right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n def test_cython_left_outer_join(self):\n left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)\n right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)\n max_group = 5\n\n ls, rs = algos.left_outer_join(left, right, max_group)\n\n exp_ls = left.argsort(kind='mergesort')\n exp_rs = right.argsort(kind='mergesort')\n\n exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,\n 6, 6, 7, 7, 8, 8, 9, 10])\n exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,\n 4, 5, 4, 5, 4, 5, -1, -1])\n\n exp_ls = exp_ls.take(exp_li)\n exp_ls[exp_li == -1] = -1\n\n exp_rs = exp_rs.take(exp_ri)\n exp_rs[exp_ri == -1] = -1\n\n self.assert_(np.array_equal(ls, exp_ls))\n self.assert_(np.array_equal(rs, exp_rs))\n\n def test_cython_right_outer_join(self):\n left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)\n right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)\n max_group = 5\n\n rs, ls = algos.left_outer_join(right, left, max_group)\n\n exp_ls = left.argsort(kind='mergesort')\n exp_rs = right.argsort(kind='mergesort')\n\n # 0 1 1 1\n exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,\n # 2 2 4\n 6, 7, 8, 6, 7, 8, -1])\n exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,\n 4, 4, 4, 5, 5, 5, 6])\n\n exp_ls = exp_ls.take(exp_li)\n exp_ls[exp_li == -1] = -1\n\n exp_rs = exp_rs.take(exp_ri)\n exp_rs[exp_ri == -1] = -1\n\n self.assert_(np.array_equal(ls, exp_ls))\n self.assert_(np.array_equal(rs, exp_rs))\n\n def test_cython_inner_join(self):\n left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)\n right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)\n max_group = 5\n\n ls, rs = algos.inner_join(left, right, max_group)\n\n exp_ls = left.argsort(kind='mergesort')\n exp_rs = right.argsort(kind='mergesort')\n\n exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,\n 6, 6, 7, 7, 8, 8])\n exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,\n 4, 5, 4, 5, 4, 5])\n\n exp_ls = exp_ls.take(exp_li)\n exp_ls[exp_li == -1] = -1\n\n exp_rs = exp_rs.take(exp_ri)\n exp_rs[exp_ri == -1] = -1\n\n self.assert_(np.array_equal(ls, exp_ls))\n self.assert_(np.array_equal(rs, exp_rs))\n\n def test_left_outer_join(self):\n joined_key2 = merge(self.df, self.df2, on='key2')\n _check_join(self.df, self.df2, joined_key2, ['key2'], how='left')\n\n joined_both = merge(self.df, self.df2)\n _check_join(self.df, self.df2, joined_both, ['key1', 'key2'],\n how='left')\n\n def test_right_outer_join(self):\n joined_key2 = merge(self.df, self.df2, on='key2', how='right')\n _check_join(self.df, self.df2, joined_key2, ['key2'], how='right')\n\n joined_both = merge(self.df, self.df2, how='right')\n _check_join(self.df, self.df2, joined_both, ['key1', 'key2'],\n how='right')\n\n def test_full_outer_join(self):\n joined_key2 = merge(self.df, self.df2, on='key2', how='outer')\n _check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')\n\n joined_both = merge(self.df, self.df2, how='outer')\n _check_join(self.df, self.df2, joined_both, ['key1', 'key2'],\n how='outer')\n\n def test_inner_join(self):\n joined_key2 = merge(self.df, self.df2, on='key2', how='inner')\n _check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')\n\n joined_both = merge(self.df, self.df2, how='inner')\n _check_join(self.df, self.df2, joined_both, ['key1', 'key2'],\n how='inner')\n\n def test_handle_overlap(self):\n joined = merge(self.df, self.df2, on='key2',\n suffixes=['.foo', '.bar'])\n\n self.assert_('key1.foo' in joined)\n self.assert_('key1.bar' in joined)\n\n def test_handle_overlap_arbitrary_key(self):\n joined = merge(self.df, self.df2,\n left_on='key2', right_on='key1',\n suffixes=['.foo', '.bar'])\n self.assert_('key1.foo' in joined)\n self.assert_('key2.bar' in joined)\n\n def test_merge_common(self):\n joined = merge(self.df, self.df2)\n exp = merge(self.df, self.df2, on=['key1', 'key2'])\n tm.assert_frame_equal(joined, exp)\n\n def test_join_on(self):\n target = self.target\n source = self.source\n\n merged = target.join(source, on='C')\n self.assert_(np.array_equal(merged['MergedA'], target['A']))\n self.assert_(np.array_equal(merged['MergedD'], target['D']))\n\n # join with duplicates (fix regression from DataFrame/Matrix merge)\n df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})\n df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])\n joined = df.join(df2, on='key')\n expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],\n 'value': [0, 0, 1, 1, 2]})\n assert_frame_equal(joined, expected)\n\n # Test when some are missing\n df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],\n columns=['one'])\n df_b = DataFrame([['foo'], ['bar']], index=[1, 2],\n columns=['two'])\n df_c = DataFrame([[1], [2]], index=[1, 2],\n columns=['three'])\n joined = df_a.join(df_b, on='one')\n joined = joined.join(df_c, on='one')\n self.assert_(np.isnan(joined['two']['c']))\n self.assert_(np.isnan(joined['three']['c']))\n\n # merge column not p resent\n self.assertRaises(Exception, target.join, source, on='E')\n\n # overlap\n source_copy = source.copy()\n source_copy['A'] = 0\n self.assertRaises(Exception, target.join, source_copy, on='A')\n\n def test_join_on_pass_vector(self):\n expected = self.target.join(self.source, on='C')\n del expected['C']\n\n join_col = self.target.pop('C')\n result = self.target.join(self.source, on=join_col)\n assert_frame_equal(result, expected)\n\n def test_join_with_len0(self):\n # nothing to merge\n merged = self.target.join(self.source.reindex([]), on='C')\n for col in self.source:\n self.assert_(col in merged)\n self.assert_(merged[col].isnull().all())\n\n merged2 = self.target.join(self.source.reindex([]), on='C',\n how='inner')\n self.assert_(merged2.columns.equals(merged.columns))\n self.assertEqual(len(merged2), 0)\n\n def test_join_on_inner(self):\n df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})\n df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])\n\n joined = df.join(df2, on='key', how='inner')\n\n expected = df.join(df2, on='key')\n expected = expected[expected['value'].notnull()]\n self.assert_(np.array_equal(joined['key'], expected['key']))\n self.assert_(np.array_equal(joined['value'], expected['value']))\n self.assert_(joined.index.equals(expected.index))\n\n def test_join_on_singlekey_list(self):\n df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})\n df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])\n\n # corner cases\n joined = df.join(df2, on=['key'])\n expected = df.join(df2, on='key')\n\n assert_frame_equal(joined, expected)\n\n def test_join_on_series(self):\n result = self.target.join(self.source['MergedA'], on='C')\n expected = self.target.join(self.source[['MergedA']], on='C')\n assert_frame_equal(result, expected)\n\n def test_join_on_series_buglet(self):\n # GH #638\n df = DataFrame({'a': [1, 1]})\n ds = Series([2], index=[1], name='b')\n result = df.join(ds, on='a')\n expected = DataFrame({'a': [1, 1],\n 'b': [2, 2]}, index=df.index)\n tm.assert_frame_equal(result, expected)\n\n def test_join_index_mixed(self):\n df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},\n index=np.arange(10),\n columns=['A', 'B', 'C', 'D'])\n self.assert_(df1['B'].dtype == np.int64)\n self.assert_(df1['D'].dtype == np.bool_)\n\n df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},\n index=np.arange(0, 10, 2),\n columns=['A', 'B', 'C', 'D'])\n\n # overlap\n joined = df1.join(df2, lsuffix='_one', rsuffix='_two')\n expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',\n 'A_two', 'B_two', 'C_two', 'D_two']\n df1.columns = expected_columns[:4]\n df2.columns = expected_columns[4:]\n expected = _join_by_hand(df1, df2)\n assert_frame_equal(joined, expected)\n\n # no overlapping blocks\n df1 = DataFrame(index=np.arange(10))\n df1['bool'] = True\n df1['string'] = 'foo'\n\n df2 = DataFrame(index=np.arange(5, 15))\n df2['int'] = 1\n df2['float'] = 1.\n\n for kind in JOIN_TYPES:\n\n joined = df1.join(df2, how=kind)\n expected = _join_by_hand(df1, df2, how=kind)\n assert_frame_equal(joined, expected)\n\n joined = df2.join(df1, how=kind)\n expected = _join_by_hand(df2, df1, how=kind)\n assert_frame_equal(joined, expected)\n\n def test_join_empty_bug(self):\n # generated an exception in 0.4.3\n x = DataFrame()\n x.join(DataFrame([3], index=[0], columns=['A']), how='outer')\n\n def test_join_unconsolidated(self):\n # GH #331\n a = DataFrame(randn(30, 2), columns=['a', 'b'])\n c = Series(randn(30))\n a['c'] = c\n d = DataFrame(randn(30, 1), columns=['q'])\n\n # it works!\n a.join(d)\n d.join(a)\n\n def test_join_multiindex(self):\n index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],\n [1, 2, 3, 1, 2, 3]],\n names=['first', 'second'])\n\n index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],\n [1, 2, 3, 1, 2, 3]],\n names=['first', 'second'])\n\n df1 = DataFrame(data=np.random.randn(6), index=index1,\n columns=['var X'])\n df2 = DataFrame(data=np.random.randn(6), index=index2,\n columns=['var Y'])\n\n df1 = df1.sortlevel(0)\n df2 = df2.sortlevel(0)\n\n joined = df1.join(df2, how='outer')\n ex_index = index1._tuple_index + index2._tuple_index\n expected = df1.reindex(ex_index).join(df2.reindex(ex_index))\n expected.index.names = index1.names\n assert_frame_equal(joined, expected)\n self.assertEqual(joined.index.names, index1.names)\n\n df1 = df1.sortlevel(1)\n df2 = df2.sortlevel(1)\n\n joined = df1.join(df2, how='outer').sortlevel(0)\n ex_index = index1._tuple_index + index2._tuple_index\n expected = df1.reindex(ex_index).join(df2.reindex(ex_index))\n expected.index.names = index1.names\n\n assert_frame_equal(joined, expected)\n self.assertEqual(joined.index.names, index1.names)\n\n def test_join_inner_multiindex(self):\n key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',\n 'qux', 'snap']\n key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',\n 'three', 'one']\n\n data = np.random.randn(len(key1))\n data = DataFrame({'key1': key1, 'key2': key2,\n 'data': data})\n\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n to_join = DataFrame(np.random.randn(10, 3), index=index,\n columns=['j_one', 'j_two', 'j_three'])\n\n joined = data.join(to_join, on=['key1', 'key2'], how='inner')\n expected = merge(data, to_join.reset_index(),\n left_on=['key1', 'key2'],\n right_on=['first', 'second'], how='inner',\n sort=False)\n\n expected2 = merge(to_join, data,\n right_on=['key1', 'key2'], left_index=True,\n how='inner', sort=False)\n assert_frame_equal(joined, expected2.reindex_like(joined))\n\n expected2 = merge(to_join, data, right_on=['key1', 'key2'],\n left_index=True, how='inner', sort=False)\n\n expected = expected.drop(['first', 'second'], axis=1)\n expected.index = joined.index\n\n self.assert_(joined.index.is_monotonic)\n assert_frame_equal(joined, expected)\n\n # _assert_same_contents(expected, expected2.ix[:, expected.columns])\n\n def test_join_hierarchical_mixed(self):\n df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])\n new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})\n other_df = DataFrame(\n [(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])\n other_df.set_index('a', inplace=True)\n\n result = merge(new_df, other_df, left_index=True, right_index=True)\n self.assertTrue(('b', 'mean') in result)\n self.assertTrue('b' in result)\n\n def test_join_float64_float32(self):\n\n a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype = np.float64)\n b = DataFrame(randn(10, 1), columns=['c'], dtype = np.float32)\n joined = a.join(b)\n self.assert_(joined.dtypes['a'] == 'float64')\n self.assert_(joined.dtypes['b'] == 'float64')\n self.assert_(joined.dtypes['c'] == 'float32')\n\n a = np.random.randint(0, 5, 100).astype('int64')\n b = np.random.random(100).astype('float64')\n c = np.random.random(100).astype('float32')\n df = DataFrame({'a': a, 'b': b, 'c': c})\n xpdf = DataFrame({'a': a, 'b': b, 'c': c })\n s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])\n rs = df.merge(s, left_on='a', right_index=True)\n self.assert_(rs.dtypes['a'] == 'int64')\n self.assert_(rs.dtypes['b'] == 'float64')\n self.assert_(rs.dtypes['c'] == 'float32')\n self.assert_(rs.dtypes['md'] == 'float32')\n\n xp = xpdf.merge(s, left_on='a', right_index=True)\n assert_frame_equal(rs, xp)\n\n def test_join_many_non_unique_index(self):\n df1 = DataFrame({\"a\": [1, 1], \"b\": [1, 1], \"c\": [10, 20]})\n df2 = DataFrame({\"a\": [1, 1], \"b\": [1, 2], \"d\": [100, 200]})\n df3 = DataFrame({\"a\": [1, 1], \"b\": [1, 2], \"e\": [1000, 2000]})\n idf1 = df1.set_index([\"a\", \"b\"])\n idf2 = df2.set_index([\"a\", \"b\"])\n idf3 = df3.set_index([\"a\", \"b\"])\n\n result = idf1.join([idf2, idf3], how='outer')\n\n df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')\n expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')\n\n result = result.reset_index()\n\n result['a'] = result['a'].astype(np.float64)\n result['b'] = result['b'].astype(np.float64)\n\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n df1 = DataFrame({\"a\": [1, 1, 1], \"b\": [1, 1, 1], \"c\": [10, 20, 30]})\n df2 = DataFrame({\"a\": [1, 1, 1], \"b\": [1, 1, 2], \"d\": [100, 200, 300]})\n df3 = DataFrame(\n {\"a\": [1, 1, 1], \"b\": [1, 1, 2], \"e\": [1000, 2000, 3000]})\n idf1 = df1.set_index([\"a\", \"b\"])\n idf2 = df2.set_index([\"a\", \"b\"])\n idf3 = df3.set_index([\"a\", \"b\"])\n result = idf1.join([idf2, idf3], how='inner')\n\n df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')\n expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')\n\n result = result.reset_index()\n\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n def test_merge_index_singlekey_right_vs_left(self):\n left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n merged1 = merge(left, right, left_on='key',\n right_index=True, how='left', sort=False)\n merged2 = merge(right, left, right_on='key',\n left_index=True, how='right', sort=False)\n assert_frame_equal(merged1, merged2.ix[:, merged1.columns])\n\n merged1 = merge(left, right, left_on='key',\n right_index=True, how='left', sort=True)\n merged2 = merge(right, left, right_on='key',\n left_index=True, how='right', sort=True)\n assert_frame_equal(merged1, merged2.ix[:, merged1.columns])\n\n def test_merge_index_singlekey_inner(self):\n left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n # inner join\n result = merge(left, right, left_on='key', right_index=True,\n how='inner')\n expected = left.join(right, on='key').ix[result.index]\n assert_frame_equal(result, expected)\n\n result = merge(right, left, right_on='key', left_index=True,\n how='inner')\n expected = left.join(right, on='key').ix[result.index]\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n def test_merge_misspecified(self):\n self.assertRaises(Exception, merge, self.left, self.right,\n left_index=True)\n self.assertRaises(Exception, merge, self.left, self.right,\n right_index=True)\n\n self.assertRaises(Exception, merge, self.left, self.left,\n left_on='key', on='key')\n\n self.assertRaises(Exception, merge, self.df, self.df2,\n left_on=['key1'], right_on=['key1', 'key2'])\n\n def test_merge_overlap(self):\n merged = merge(self.left, self.left, on='key')\n exp_len = (self.left['key'].value_counts() ** 2).sum()\n self.assertEqual(len(merged), exp_len)\n self.assert_('v1_x' in merged)\n self.assert_('v1_y' in merged)\n\n def test_merge_different_column_key_names(self):\n left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n 'value': [1, 2, 3, 4]})\n right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],\n 'value': [5, 6, 7, 8]})\n\n merged = left.merge(right, left_on='lkey', right_on='rkey',\n how='outer', sort=True)\n\n assert_almost_equal(merged['lkey'],\n ['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan])\n assert_almost_equal(merged['rkey'],\n ['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'])\n assert_almost_equal(merged['value_x'], [2, 3, 1, 1, 4, 4, np.nan])\n assert_almost_equal(merged['value_y'], [6, np.nan, 5, 8, 5, 8, 7])\n\n def test_merge_nocopy(self):\n left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))\n right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))\n\n merged = merge(left, right, left_index=True,\n right_index=True, copy=False)\n\n merged['a'] = 6\n self.assert_((left['a'] == 6).all())\n\n merged['d'] = 'peekaboo'\n self.assert_((right['d'] == 'peekaboo').all())\n\n def test_join_sort(self):\n left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],\n 'value': [1, 2, 3, 4]})\n right = DataFrame({'value2': ['a', 'b', 'c']},\n index=['bar', 'baz', 'foo'])\n\n joined = left.join(right, on='key', sort=True)\n expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],\n 'value': [2, 3, 1, 4],\n 'value2': ['a', 'b', 'c', 'c']},\n index=[1, 2, 0, 3])\n assert_frame_equal(joined, expected)\n\n # smoke test\n joined = left.join(right, on='key', sort=False)\n self.assert_(np.array_equal(joined.index, lrange(4)))\n\n def test_intelligently_handle_join_key(self):\n # #733, be a bit more 1337 about not returning unconsolidated DataFrame\n\n left = DataFrame({'key': [1, 1, 2, 2, 3],\n 'value': lrange(5)}, columns=['value', 'key'])\n right = DataFrame({'key': [1, 1, 2, 3, 4, 5],\n 'rvalue': lrange(6)})\n\n joined = merge(left, right, on='key', how='outer')\n expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5.],\n 'value': np.array([0, 0, 1, 1, 2, 3, 4,\n np.nan, np.nan]),\n 'rvalue': np.array([0, 1, 0, 1, 2, 2, 3, 4, 5])},\n columns=['value', 'key', 'rvalue'])\n assert_frame_equal(joined, expected, check_dtype=False)\n\n self.assert_(joined._data.is_consolidated())\n\n def test_handle_join_key_pass_array(self):\n left = DataFrame({'key': [1, 1, 2, 2, 3],\n 'value': lrange(5)}, columns=['value', 'key'])\n right = DataFrame({'rvalue': lrange(6)})\n key = np.array([1, 1, 2, 3, 4, 5])\n\n merged = merge(left, right, left_on='key', right_on=key, how='outer')\n merged2 = merge(right, left, left_on=key, right_on='key', how='outer')\n\n assert_series_equal(merged['key'], merged2['key'])\n self.assert_(merged['key'].notnull().all())\n self.assert_(merged2['key'].notnull().all())\n\n left = DataFrame({'value': lrange(5)}, columns=['value'])\n right = DataFrame({'rvalue': lrange(6)})\n lkey = np.array([1, 1, 2, 2, 3])\n rkey = np.array([1, 1, 2, 3, 4, 5])\n\n merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')\n self.assert_(np.array_equal(merged['key_0'],\n np.array([1, 1, 1, 1, 2, 2, 3, 4, 5])))\n\n left = DataFrame({'value': lrange(3)})\n right = DataFrame({'rvalue': lrange(6)})\n\n key = np.array([0, 1, 1, 2, 2, 3])\n merged = merge(left, right, left_index=True, right_on=key, how='outer')\n self.assert_(np.array_equal(merged['key_0'], key))\n\n def test_mixed_type_join_with_suffix(self):\n # GH #916\n df = DataFrame(np.random.randn(20, 6),\n columns=['a', 'b', 'c', 'd', 'e', 'f'])\n df.insert(0, 'id', 0)\n df.insert(5, 'dt', 'foo')\n\n grouped = df.groupby('id')\n mn = grouped.mean()\n cn = grouped.count()\n\n # it works!\n mn.join(cn, rsuffix='_right')\n\n def test_no_overlap_more_informative_error(self):\n dt = datetime.now()\n df1 = DataFrame({'x': ['a']}, index=[dt])\n\n df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])\n self.assertRaises(MergeError, merge, df1, df2)\n\n def test_merge_non_unique_indexes(self):\n\n dt = datetime(2012, 5, 1)\n dt2 = datetime(2012, 5, 2)\n dt3 = datetime(2012, 5, 3)\n dt4 = datetime(2012, 5, 4)\n\n df1 = DataFrame({'x': ['a']}, index=[dt])\n df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])\n _check_merge(df1, df2)\n\n # Not monotonic\n df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])\n df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},\n index=[dt3, dt3, dt2, dt2, dt, dt])\n _check_merge(df1, df2)\n\n df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])\n df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])\n _check_merge(df1, df2)\n\n def test_merge_non_unique_index_many_to_many(self):\n dt = datetime(2012, 5, 1)\n dt2 = datetime(2012, 5, 2)\n dt3 = datetime(2012, 5, 3)\n df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},\n index=[dt2, dt2, dt, dt])\n df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},\n index=[dt2, dt2, dt3, dt, dt])\n _check_merge(df1, df2)\n\n def test_left_merge_empty_dataframe(self):\n left = DataFrame({'key': [1], 'value': [2]})\n right = DataFrame({'key': []})\n\n result = merge(left, right, on='key', how='left')\n assert_frame_equal(result, left)\n\n result = merge(right, left, on='key', how='right')\n assert_frame_equal(result, left)\n\n def test_merge_nosort(self):\n # #2098, anything to do?\n\n from datetime import datetime\n\n d = {\"var1\": np.random.randint(0, 10, size=10),\n \"var2\": np.random.randint(0, 10, size=10),\n \"var3\": [datetime(2012, 1, 12), datetime(2011, 2, 4),\n datetime(\n 2010, 2, 3), datetime(2012, 1, 12),\n datetime(\n 2011, 2, 4), datetime(2012, 4, 3),\n datetime(\n 2012, 3, 4), datetime(2008, 5, 1),\n datetime(2010, 2, 3), datetime(2012, 2, 3)]}\n df = DataFrame.from_dict(d)\n var3 = df.var3.unique()\n var3.sort()\n new = DataFrame.from_dict({\"var3\": var3,\n \"var8\": np.random.random(7)})\n\n result = df.merge(new, on=\"var3\", sort=False)\n exp = merge(df, new, on='var3', sort=False)\n assert_frame_equal(result, exp)\n\n self.assert_((df.var3.unique() == result.var3.unique()).all())\n\n def test_merge_nan_right(self):\n df1 = DataFrame({\"i1\" : [0, 1], \"i2\" : [0, 1]})\n df2 = DataFrame({\"i1\" : [0], \"i3\" : [0]})\n result = df1.join(df2, on=\"i1\", rsuffix=\"_\")\n expected = DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},\n 'i1_': {0: 0, 1: np.nan}, 'i3': {0: 0.0, 1: np.nan},\n None: {0: 0, 1: 0}}).set_index(None).reset_index()[['i1', 'i2', 'i1_', 'i3']]\n assert_frame_equal(result, expected, check_dtype=False)\n\n df1 = DataFrame({\"i1\" : [0, 1], \"i2\" : [0.5, 1.5]})\n df2 = DataFrame({\"i1\" : [0], \"i3\" : [0.7]})\n result = df1.join(df2, rsuffix=\"_\", on='i1')\n expected = DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},\n 'i2': {0: 0.5, 1: 1.5}, 'i3': {0: 0.69999999999999996,\n 1: nan}})[['i1', 'i2', 'i1_', 'i3']]\n assert_frame_equal(result, expected)\n\n\n def test_overlapping_columns_error_message(self):\n # #2649\n df = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9]})\n df2 = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9]})\n\n df.columns = ['key', 'foo', 'foo']\n df2.columns = ['key', 'bar', 'bar']\n\n self.assertRaises(Exception, merge, df, df2)\n\ndef _check_merge(x, y):\n for how in ['inner', 'left', 'outer']:\n result = x.join(y, how=how)\n\n expected = merge(x.reset_index(), y.reset_index(), how=how,\n sort=True)\n expected = expected.set_index('index')\n\n assert_frame_equal(result, expected, check_names=False) # TODO check_names on merge?\n\n\nclass TestMergeMulti(unittest.TestCase):\n\n def setUp(self):\n self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,\n columns=['j_one', 'j_two', 'j_three'])\n\n # a little relevant example with NAs\n key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',\n 'qux', 'snap']\n key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',\n 'three', 'one']\n\n data = np.random.randn(len(key1))\n self.data = DataFrame({'key1': key1, 'key2': key2,\n 'data': data})\n\n def test_merge_on_multikey(self):\n joined = self.data.join(self.to_join, on=['key1', 'key2'])\n\n join_key = Index(lzip(self.data['key1'], self.data['key2']))\n indexer = self.to_join.index.get_indexer(join_key)\n ex_values = self.to_join.values.take(indexer, axis=0)\n ex_values[indexer == -1] = np.nan\n expected = self.data.join(DataFrame(ex_values,\n columns=self.to_join.columns))\n\n # TODO: columns aren't in the same order yet\n assert_frame_equal(joined, expected.ix[:, joined.columns])\n\n def test_merge_right_vs_left(self):\n # compare left vs right merge with multikey\n merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],\n right_index=True, how='left')\n merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],\n left_index=True, how='right')\n merged2 = merged2.ix[:, merged1.columns]\n assert_frame_equal(merged1, merged2)\n\n def test_compress_group_combinations(self):\n\n # ~ 40000000 possible unique groups\n key1 = np.array([rands(10) for _ in range(10000)], dtype='O')\n key1 = np.tile(key1, 2)\n key2 = key1[::-1]\n\n df = DataFrame({'key1': key1, 'key2': key2,\n 'value1': np.random.randn(20000)})\n\n df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],\n 'value2': np.random.randn(10000)})\n\n # just to hit the label compression code path\n merged = merge(df, df2, how='outer')\n\n def test_left_join_index_preserve_order(self):\n\n left = DataFrame({'k1': [0, 1, 2] * 8,\n 'k2': ['foo', 'bar'] * 12,\n 'v': np.array(np.arange(24),dtype=np.int64) })\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': [5, 7]}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n expected['v2'] = np.nan\n expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5\n expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7\n\n tm.assert_frame_equal(result, expected)\n\n # test join with multi dtypes blocks\n left = DataFrame({'k1': [0, 1, 2] * 8,\n 'k2': ['foo', 'bar'] * 12,\n 'k3' : np.array([0, 1, 2]*8, dtype=np.float32),\n 'v': np.array(np.arange(24),dtype=np.int32) })\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': [5, 7]}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n expected['v2'] = np.nan\n expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5\n expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7\n\n tm.assert_frame_equal(result, expected)\n\n # do a right join for an extra test\n joined = merge(right, left, left_index=True,\n right_on=['k1', 'k2'], how='right')\n tm.assert_frame_equal(joined.ix[:, expected.columns], expected)\n\n def test_join_multi_dtypes(self):\n\n # test with multi dtypes in the join index\n def _test(dtype1,dtype2):\n left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),\n 'k2': ['foo', 'bar'] * 12,\n 'v': np.array(np.arange(24),dtype=np.int64) })\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n\n if dtype2.kind == 'i':\n dtype2 = np.dtype('float64')\n expected['v2'] = np.array(np.nan,dtype=dtype2)\n expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5\n expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7\n\n tm.assert_frame_equal(result, expected)\n\n for d1 in [np.int64,np.int32,np.int16,np.int8,np.uint8]:\n for d2 in [np.int64,np.float64,np.float32,np.float16]:\n _test(np.dtype(d1),np.dtype(d2))\n\n def test_left_merge_na_buglet(self):\n left = DataFrame({'id': list('abcde'), 'v1': randn(5),\n 'v2': randn(5), 'dummy': list('abcde'),\n 'v3': randn(5)},\n columns=['id', 'v1', 'v2', 'dummy', 'v3'])\n right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],\n 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})\n\n merged = merge(left, right, on='id', how='left')\n\n rdf = right.drop(['id'], axis=1)\n expected = left.join(rdf)\n tm.assert_frame_equal(merged, expected)\n\n def test_merge_na_keys(self):\n data = [[1950, \"A\", 1.5],\n [1950, \"B\", 1.5],\n [1955, \"B\", 1.5],\n [1960, \"B\", np.nan],\n [1970, \"B\", 4.],\n [1950, \"C\", 4.],\n [1960, \"C\", np.nan],\n [1965, \"C\", 3.],\n [1970, \"C\", 4.]]\n\n frame = DataFrame(data, columns=[\"year\", \"panel\", \"data\"])\n\n other_data = [[1960, 'A', np.nan],\n [1970, 'A', np.nan],\n [1955, 'A', np.nan],\n [1965, 'A', np.nan],\n [1965, 'B', np.nan],\n [1955, 'C', np.nan]]\n other = DataFrame(other_data, columns=['year', 'panel', 'data'])\n\n result = frame.merge(other, how='outer')\n\n expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')\n expected = expected.replace(-999, np.nan)\n\n tm.assert_frame_equal(result, expected)\n\n def test_int64_overflow_issues(self):\n # #2690, combinatorial explosion\n df1 = DataFrame(np.random.randn(1000, 7),\n columns=list('ABCDEF') + ['G1'])\n df2 = DataFrame(np.random.randn(1000, 7),\n columns=list('ABCDEF') + ['G2'])\n\n # it works!\n result = merge(df1, df2, how='outer')\n self.assertTrue(len(result) == 2000)\n\ndef _check_join(left, right, result, join_col, how='left',\n lsuffix='_x', rsuffix='_y'):\n\n # some smoke tests\n for c in join_col:\n assert(result[c].notnull().all())\n\n left_grouped = left.groupby(join_col)\n right_grouped = right.groupby(join_col)\n\n for group_key, group in result.groupby(join_col):\n l_joined = _restrict_to_columns(group, left.columns, lsuffix)\n r_joined = _restrict_to_columns(group, right.columns, rsuffix)\n\n try:\n lgroup = left_grouped.get_group(group_key)\n except KeyError:\n if how in ('left', 'inner'):\n raise AssertionError('key %s should not have been in the join'\n % str(group_key))\n\n _assert_all_na(l_joined, left.columns, join_col)\n else:\n _assert_same_contents(l_joined, lgroup)\n\n try:\n rgroup = right_grouped.get_group(group_key)\n except KeyError:\n if how in ('right', 'inner'):\n raise AssertionError('key %s should not have been in the join'\n % str(group_key))\n\n _assert_all_na(r_joined, right.columns, join_col)\n else:\n _assert_same_contents(r_joined, rgroup)\n\n\ndef _restrict_to_columns(group, columns, suffix):\n found = [c for c in group.columns\n if c in columns or c.replace(suffix, '') in columns]\n\n # filter\n group = group.ix[:, found]\n\n # get rid of suffixes, if any\n group = group.rename(columns=lambda x: x.replace(suffix, ''))\n\n # put in the right order...\n group = group.ix[:, columns]\n\n return group\n\n\ndef _assert_same_contents(join_chunk, source):\n NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...\n\n jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values\n svalues = source.fillna(NA_SENTINEL).drop_duplicates().values\n\n rows = set(tuple(row) for row in jvalues)\n assert(len(rows) == len(source))\n assert(all(tuple(row) in rows for row in svalues))\n\n\ndef _assert_all_na(join_chunk, source_columns, join_col):\n for c in source_columns:\n if c in join_col:\n continue\n assert(join_chunk[c].isnull().all())\n\n\ndef _join_by_hand(a, b, how='left'):\n join_index = a.index.join(b.index, how=how)\n\n a_re = a.reindex(join_index)\n b_re = b.reindex(join_index)\n\n result_columns = a.columns.append(b.columns)\n\n for col, s in compat.iteritems(b_re):\n a_re[col] = s\n return a_re.reindex(columns=result_columns)\n\n\nclass TestConcatenate(unittest.TestCase):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.frame = DataFrame(tm.getSeriesData())\n self.mixed_frame = self.frame.copy()\n self.mixed_frame['foo'] = 'bar'\n\n def test_append(self):\n begin_index = self.frame.index[:5]\n end_index = self.frame.index[5:]\n\n begin_frame = self.frame.reindex(begin_index)\n end_frame = self.frame.reindex(end_index)\n\n appended = begin_frame.append(end_frame)\n assert_almost_equal(appended['A'], self.frame['A'])\n\n del end_frame['A']\n partial_appended = begin_frame.append(end_frame)\n self.assert_('A' in partial_appended)\n\n partial_appended = end_frame.append(begin_frame)\n self.assert_('A' in partial_appended)\n\n # mixed type handling\n appended = self.mixed_frame[:5].append(self.mixed_frame[5:])\n assert_frame_equal(appended, self.mixed_frame)\n\n # what to test here\n mixed_appended = self.mixed_frame[:5].append(self.frame[5:])\n mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])\n\n # all equal except 'foo' column\n assert_frame_equal(\n mixed_appended.reindex(columns=['A', 'B', 'C', 'D']),\n mixed_appended2.reindex(columns=['A', 'B', 'C', 'D']))\n\n # append empty\n empty = DataFrame({})\n\n appended = self.frame.append(empty)\n assert_frame_equal(self.frame, appended)\n self.assert_(appended is not self.frame)\n\n appended = empty.append(self.frame)\n assert_frame_equal(self.frame, appended)\n self.assert_(appended is not self.frame)\n\n # overlap\n self.assertRaises(ValueError, self.frame.append, self.frame,\n verify_integrity=True)\n\n def test_append_length0_frame(self):\n df = DataFrame(columns=['A', 'B', 'C'])\n df3 = DataFrame(index=[0, 1], columns=['A', 'B'])\n df5 = df.append(df3)\n\n expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C'])\n assert_frame_equal(df5, expected)\n\n def test_append_records(self):\n arr1 = np.zeros((2,), dtype=('i4,f4,a10'))\n arr1[:] = [(1, 2., 'Hello'), (2, 3., \"World\")]\n\n arr2 = np.zeros((3,), dtype=('i4,f4,a10'))\n arr2[:] = [(3, 4., 'foo'),\n (5, 6., \"bar\"),\n (7., 8., 'baz')]\n\n df1 = DataFrame(arr1)\n df2 = DataFrame(arr2)\n\n result = df1.append(df2, ignore_index=True)\n expected = DataFrame(np.concatenate((arr1, arr2)))\n assert_frame_equal(result, expected)\n\n def test_append_different_columns(self):\n df = DataFrame({'bools': np.random.randn(10) > 0,\n 'ints': np.random.randint(0, 10, 10),\n 'floats': np.random.randn(10),\n 'strings': ['foo', 'bar'] * 5})\n\n a = df[:5].ix[:, ['bools', 'ints', 'floats']]\n b = df[5:].ix[:, ['strings', 'ints', 'floats']]\n\n appended = a.append(b)\n self.assert_(isnull(appended['strings'][0:4]).all())\n self.assert_(isnull(appended['bools'][5:]).all())\n\n def test_append_many(self):\n chunks = [self.frame[:5], self.frame[5:10],\n self.frame[10:15], self.frame[15:]]\n\n result = chunks[0].append(chunks[1:])\n tm.assert_frame_equal(result, self.frame)\n\n chunks[-1]['foo'] = 'bar'\n result = chunks[0].append(chunks[1:])\n tm.assert_frame_equal(result.ix[:, self.frame.columns], self.frame)\n self.assert_((result['foo'][15:] == 'bar').all())\n self.assert_(result['foo'][:15].isnull().all())\n\n def test_append_preserve_index_name(self):\n # #980\n df1 = DataFrame(data=None, columns=['A', 'B', 'C'])\n df1 = df1.set_index(['A'])\n df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]],\n columns=['A', 'B', 'C'])\n df2 = df2.set_index(['A'])\n\n result = df1.append(df2)\n self.assert_(result.index.name == 'A')\n\n def test_join_many(self):\n df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))\n df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]\n\n joined = df_list[0].join(df_list[1:])\n tm.assert_frame_equal(joined, df)\n\n df_list = [df[['a', 'b']][:-2],\n df[['c', 'd']][2:], df[['e', 'f']][1:9]]\n\n def _check_diff_index(df_list, result, exp_index):\n reindexed = [x.reindex(exp_index) for x in df_list]\n expected = reindexed[0].join(reindexed[1:])\n tm.assert_frame_equal(result, expected)\n\n # different join types\n joined = df_list[0].join(df_list[1:], how='outer')\n _check_diff_index(df_list, joined, df.index)\n\n joined = df_list[0].join(df_list[1:])\n _check_diff_index(df_list, joined, df_list[0].index)\n\n joined = df_list[0].join(df_list[1:], how='inner')\n _check_diff_index(df_list, joined, df.index[2:8])\n\n self.assertRaises(ValueError, df_list[0].join, df_list[1:], on='a')\n\n def test_join_many_mixed(self):\n df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])\n df['key'] = ['foo', 'bar'] * 4\n df1 = df.ix[:, ['A', 'B']]\n df2 = df.ix[:, ['C', 'D']]\n df3 = df.ix[:, ['key']]\n\n result = df1.join([df2, df3])\n assert_frame_equal(result, df)\n\n def test_append_missing_column_proper_upcast(self):\n df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')})\n df2 = DataFrame({'B': np.array([True, False, True, False],\n dtype=bool)})\n\n appended = df1.append(df2, ignore_index=True)\n self.assert_(appended['A'].dtype == 'f8')\n self.assert_(appended['B'].dtype == 'O')\n\n def test_concat_with_group_keys(self):\n df = DataFrame(np.random.randn(4, 3))\n df2 = DataFrame(np.random.randn(4, 4))\n\n # axis=0\n df = DataFrame(np.random.randn(3, 4))\n df2 = DataFrame(np.random.randn(4, 4))\n\n result = concat([df, df2], keys=[0, 1])\n exp_index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1, 1],\n [0, 1, 2, 0, 1, 2, 3]])\n expected = DataFrame(np.r_[df.values, df2.values],\n index=exp_index)\n tm.assert_frame_equal(result, expected)\n\n result = concat([df, df], keys=[0, 1])\n exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],\n [0, 1, 2, 0, 1, 2]])\n expected = DataFrame(np.r_[df.values, df.values],\n index=exp_index2)\n tm.assert_frame_equal(result, expected)\n\n # axis=1\n df = DataFrame(np.random.randn(4, 3))\n df2 = DataFrame(np.random.randn(4, 4))\n\n result = concat([df, df2], keys=[0, 1], axis=1)\n expected = DataFrame(np.c_[df.values, df2.values],\n columns=exp_index)\n tm.assert_frame_equal(result, expected)\n\n result = concat([df, df], keys=[0, 1], axis=1)\n expected = DataFrame(np.c_[df.values, df.values],\n columns=exp_index2)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_keys_specific_levels(self):\n df = DataFrame(np.random.randn(10, 4))\n pieces = [df.ix[:, [0, 1]], df.ix[:, [2]], df.ix[:, [3]]]\n level = ['three', 'two', 'one', 'zero']\n result = concat(pieces, axis=1, keys=['one', 'two', 'three'],\n levels=[level],\n names=['group_key'])\n\n self.assert_(np.array_equal(result.columns.levels[0], level))\n self.assertEqual(result.columns.names[0], 'group_key')\n\n def test_concat_dataframe_keys_bug(self):\n t1 = DataFrame({'value': Series([1, 2, 3],\n index=Index(['a', 'b', 'c'], name='id'))})\n t2 = DataFrame({'value': Series([7, 8],\n index=Index(['a', 'b'], name='id'))})\n\n # it works\n result = concat([t1, t2], axis=1, keys=['t1', 't2'])\n self.assertEqual(list(result.columns), [('t1', 'value'),\n ('t2', 'value')])\n\n def test_concat_dict(self):\n frames = {'foo': DataFrame(np.random.randn(4, 3)),\n 'bar': DataFrame(np.random.randn(4, 3)),\n 'baz': DataFrame(np.random.randn(4, 3)),\n 'qux': DataFrame(np.random.randn(4, 3))}\n\n sorted_keys = sorted(frames)\n\n result = concat(frames)\n expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)\n tm.assert_frame_equal(result, expected)\n\n result = concat(frames, axis=1)\n expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys,\n axis=1)\n tm.assert_frame_equal(result, expected)\n\n keys = ['baz', 'foo', 'bar']\n result = concat(frames, keys=keys)\n expected = concat([frames[k] for k in keys], keys=keys)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_ignore_index(self):\n frame1 = DataFrame({\"test1\": [\"a\", \"b\", \"c\"],\n \"test2\": [1, 2, 3],\n \"test3\": [4.5, 3.2, 1.2]})\n frame2 = DataFrame({\"test3\": [5.2, 2.2, 4.3]})\n frame1.index = Index([\"x\", \"y\", \"z\"])\n frame2.index = Index([\"x\", \"y\", \"q\"])\n\n v1 = concat([frame1, frame2], axis=1, ignore_index=True)\n\n nan = np.nan\n expected = DataFrame([[nan, nan, nan, 4.3],\n ['a', 1, 4.5, 5.2],\n ['b', 2, 3.2, 2.2],\n ['c', 3, 1.2, nan]],\n index=Index([\"q\", \"x\", \"y\", \"z\"]))\n\n tm.assert_frame_equal(v1, expected)\n\n def test_concat_multiindex_with_keys(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n frame = DataFrame(np.random.randn(10, 3), index=index,\n columns=Index(['A', 'B', 'C'], name='exp'))\n result = concat([frame, frame], keys=[0, 1], names=['iteration'])\n\n self.assertEqual(result.index.names, ('iteration',) + index.names)\n tm.assert_frame_equal(result.ix[0], frame)\n tm.assert_frame_equal(result.ix[1], frame)\n self.assertEqual(result.index.nlevels, 3)\n\n def test_concat_keys_and_levels(self):\n df = DataFrame(np.random.randn(1, 3))\n df2 = DataFrame(np.random.randn(1, 4))\n\n levels = [['foo', 'baz'], ['one', 'two']]\n names = ['first', 'second']\n result = concat([df, df2, df, df2],\n keys=[('foo', 'one'), ('foo', 'two'),\n ('baz', 'one'), ('baz', 'two')],\n levels=levels,\n names=names)\n expected = concat([df, df2, df, df2])\n exp_index = MultiIndex(levels=levels + [[0]],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1],\n [0, 0, 0, 0]],\n names=names + [None])\n expected.index = exp_index\n\n assert_frame_equal(result, expected)\n\n # no names\n\n result = concat([df, df2, df, df2],\n keys=[('foo', 'one'), ('foo', 'two'),\n ('baz', 'one'), ('baz', 'two')],\n levels=levels)\n self.assertEqual(result.index.names, (None,) * 3)\n\n # no levels\n result = concat([df, df2, df, df2],\n keys=[('foo', 'one'), ('foo', 'two'),\n ('baz', 'one'), ('baz', 'two')],\n names=['first', 'second'])\n self.assertEqual(result.index.names, ('first', 'second') + (None,))\n self.assert_(np.array_equal(result.index.levels[0], ['baz', 'foo']))\n\n def test_concat_keys_levels_no_overlap(self):\n # GH #1406\n df = DataFrame(np.random.randn(1, 3), index=['a'])\n df2 = DataFrame(np.random.randn(1, 4), index=['b'])\n\n self.assertRaises(ValueError, concat, [df, df],\n keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])\n\n self.assertRaises(ValueError, concat, [df, df2],\n keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])\n\n def test_concat_rename_index(self):\n a = DataFrame(np.random.rand(3, 3),\n columns=list('ABC'),\n index=Index(list('abc'), name='index_a'))\n b = DataFrame(np.random.rand(3, 3),\n columns=list('ABC'),\n index=Index(list('abc'), name='index_b'))\n\n result = concat([a, b], keys=['key0', 'key1'],\n names=['lvl0', 'lvl1'])\n\n exp = concat([a, b], keys=['key0', 'key1'], names=['lvl0'])\n names = list(exp.index.names)\n names[1] = 'lvl1'\n exp.index.set_names(names, inplace=True)\n\n tm.assert_frame_equal(result, exp)\n self.assertEqual(result.index.names, exp.index.names)\n\n def test_crossed_dtypes_weird_corner(self):\n columns = ['A', 'B', 'C', 'D']\n df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='f8'),\n 'B': np.array([1, 2, 3, 4], dtype='i8'),\n 'C': np.array([1, 2, 3, 4], dtype='f8'),\n 'D': np.array([1, 2, 3, 4], dtype='i8')},\n columns=columns)\n\n df2 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8'),\n 'B': np.array([1, 2, 3, 4], dtype='f8'),\n 'C': np.array([1, 2, 3, 4], dtype='i8'),\n 'D': np.array([1, 2, 3, 4], dtype='f8')},\n columns=columns)\n\n appended = df1.append(df2, ignore_index=True)\n expected = DataFrame(np.concatenate([df1.values, df2.values], axis=0),\n columns=columns)\n tm.assert_frame_equal(appended, expected)\n\n df = DataFrame(np.random.randn(1, 3), index=['a'])\n df2 = DataFrame(np.random.randn(1, 4), index=['b'])\n result = concat(\n [df, df2], keys=['one', 'two'], names=['first', 'second'])\n self.assertEqual(result.index.names, ('first', 'second'))\n\n def test_dups_index(self):\n # GH 4771\n\n # single dtypes\n df = DataFrame(np.random.randint(0,10,size=40).reshape(10,4),columns=['A','A','C','C'])\n\n result = concat([df,df],axis=1)\n assert_frame_equal(result.iloc[:,:4],df)\n assert_frame_equal(result.iloc[:,4:],df)\n\n result = concat([df,df],axis=0)\n assert_frame_equal(result.iloc[:10],df)\n assert_frame_equal(result.iloc[10:],df)\n\n # multi dtypes\n df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),\n DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],\n axis=1)\n\n result = concat([df,df],axis=1)\n assert_frame_equal(result.iloc[:,:6],df)\n assert_frame_equal(result.iloc[:,6:],df)\n\n result = concat([df,df],axis=0)\n assert_frame_equal(result.iloc[:10],df)\n assert_frame_equal(result.iloc[10:],df)\n\n # append\n result = df.iloc[0:8,:].append(df.iloc[8:])\n assert_frame_equal(result, df)\n\n result = df.iloc[0:8,:].append(df.iloc[8:9]).append(df.iloc[9:10])\n assert_frame_equal(result, df)\n\n expected = concat([df,df],axis=0)\n result = df.append(df)\n assert_frame_equal(result, expected)\n\n def test_join_dups(self):\n df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),\n DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],\n axis=1)\n\n expected = concat([df,df],axis=1)\n result = df.join(df,rsuffix='_2')\n result.columns = expected.columns\n assert_frame_equal(result, expected)\n\n def test_handle_empty_objects(self):\n df = DataFrame(np.random.randn(10, 4), columns=list('abcd'))\n\n baz = df[:5]\n baz['foo'] = 'bar'\n empty = df[5:5]\n\n frames = [baz, empty, empty, df[5:]]\n concatted = concat(frames, axis=0)\n\n expected = df.ix[:, ['a', 'b', 'c', 'd', 'foo']]\n expected['foo'] = expected['foo'].astype('O')\n expected['foo'][:5] = 'bar'\n\n tm.assert_frame_equal(concatted, expected)\n\n def test_panel_join(self):\n panel = tm.makePanel()\n tm.add_nans(panel)\n\n p1 = panel.ix[:2, :10, :3]\n p2 = panel.ix[2:, 5:, 2:]\n\n # left join\n result = p1.join(p2)\n expected = p1.copy()\n expected['ItemC'] = p2['ItemC']\n tm.assert_panel_equal(result, expected)\n\n # right join\n result = p1.join(p2, how='right')\n expected = p2.copy()\n expected['ItemA'] = p1['ItemA']\n expected['ItemB'] = p1['ItemB']\n expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])\n tm.assert_panel_equal(result, expected)\n\n # inner join\n result = p1.join(p2, how='inner')\n expected = panel.ix[:, 5:10, 2:3]\n tm.assert_panel_equal(result, expected)\n\n # outer join\n result = p1.join(p2, how='outer')\n expected = p1.reindex(major=panel.major_axis,\n minor=panel.minor_axis)\n expected = expected.join(p2.reindex(major=panel.major_axis,\n minor=panel.minor_axis))\n tm.assert_panel_equal(result, expected)\n\n def test_panel_join_overlap(self):\n panel = tm.makePanel()\n tm.add_nans(panel)\n\n p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']]\n p2 = panel.ix[['ItemB', 'ItemC']]\n\n joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')\n p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1')\n p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2')\n no_overlap = panel.ix[['ItemA']]\n expected = p1_suf.join(p2_suf).join(no_overlap)\n tm.assert_panel_equal(joined, expected)\n\n def test_panel_join_many(self):\n tm.K = 10\n panel = tm.makePanel()\n tm.K = 4\n\n panels = [panel.ix[:2], panel.ix[2:6], panel.ix[6:]]\n\n joined = panels[0].join(panels[1:])\n tm.assert_panel_equal(joined, panel)\n\n panels = [panel.ix[:2, :-5], panel.ix[2:6, 2:], panel.ix[6:, 5:-7]]\n\n data_dict = {}\n for p in panels:\n data_dict.update(compat.iteritems(p))\n\n joined = panels[0].join(panels[1:], how='inner')\n expected = Panel.from_dict(data_dict, intersect=True)\n tm.assert_panel_equal(joined, expected)\n\n joined = panels[0].join(panels[1:], how='outer')\n expected = Panel.from_dict(data_dict, intersect=False)\n tm.assert_panel_equal(joined, expected)\n\n # edge cases\n self.assertRaises(ValueError, panels[0].join, panels[1:],\n how='outer', lsuffix='foo', rsuffix='bar')\n self.assertRaises(ValueError, panels[0].join, panels[1:],\n how='right')\n\n def test_panel_concat_other_axes(self):\n panel = tm.makePanel()\n\n p1 = panel.ix[:, :5, :]\n p2 = panel.ix[:, 5:, :]\n\n result = concat([p1, p2], axis=1)\n tm.assert_panel_equal(result, panel)\n\n p1 = panel.ix[:, :, :2]\n p2 = panel.ix[:, :, 2:]\n\n result = concat([p1, p2], axis=2)\n tm.assert_panel_equal(result, panel)\n\n # if things are a bit misbehaved\n p1 = panel.ix[:2, :, :2]\n p2 = panel.ix[:, :, 2:]\n p1['ItemC'] = 'baz'\n\n result = concat([p1, p2], axis=2)\n\n expected = panel.copy()\n expected['ItemC'] = expected['ItemC'].astype('O')\n expected.ix['ItemC', :, :2] = 'baz'\n tm.assert_panel_equal(result, expected)\n\n def test_panel_concat_buglet(self):\n # #2257\n def make_panel():\n index = 5\n cols = 3\n\n def df():\n return DataFrame(np.random.randn(index, cols),\n index=[\"I%s\" % i for i in range(index)],\n columns=[\"C%s\" % i for i in range(cols)])\n return Panel(dict([(\"Item%s\" % x, df()) for x in ['A', 'B', 'C']]))\n\n panel1 = make_panel()\n panel2 = make_panel()\n\n panel2 = panel2.rename_axis(dict([(x, \"%s_1\" % x)\n for x in panel2.major_axis]),\n axis=1)\n\n panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)\n panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)\n\n # it works!\n concat([panel1, panel3], axis=1, verify_integrity=True)\n\n def test_panel4d_concat(self):\n p4d = tm.makePanel4D()\n\n p1 = p4d.ix[:, :, :5, :]\n p2 = p4d.ix[:, :, 5:, :]\n\n result = concat([p1, p2], axis=2)\n tm.assert_panel4d_equal(result, p4d)\n\n p1 = p4d.ix[:, :, :, :2]\n p2 = p4d.ix[:, :, :, 2:]\n\n result = concat([p1, p2], axis=3)\n tm.assert_panel4d_equal(result, p4d)\n\n def test_panel4d_concat_mixed_type(self):\n p4d = tm.makePanel4D()\n\n # if things are a bit misbehaved\n p1 = p4d.ix[:, :2, :, :2]\n p2 = p4d.ix[:, :, :, 2:]\n p1['L5'] = 'baz'\n\n result = concat([p1, p2], axis=3)\n\n p2['L5'] = np.nan\n expected = concat([p1, p2], axis=3)\n expected = expected.ix[result.labels]\n\n tm.assert_panel4d_equal(result, expected)\n\n def test_concat_series(self):\n ts = tm.makeTimeSeries()\n ts.name = 'foo'\n\n pieces = [ts[:5], ts[5:15], ts[15:]]\n\n result = concat(pieces)\n tm.assert_series_equal(result, ts)\n self.assertEqual(result.name, ts.name)\n\n result = concat(pieces, keys=[0, 1, 2])\n expected = ts.copy()\n\n ts.index = DatetimeIndex(np.array(ts.index.values, dtype='M8[ns]'))\n\n exp_labels = [np.repeat([0, 1, 2], [len(x) for x in pieces]),\n np.arange(len(ts))]\n exp_index = MultiIndex(levels=[[0, 1, 2], ts.index],\n labels=exp_labels)\n expected.index = exp_index\n tm.assert_series_equal(result, expected)\n\n def test_concat_series_axis1(self):\n ts = tm.makeTimeSeries()\n\n pieces = [ts[:-2], ts[2:], ts[2:-2]]\n\n result = concat(pieces, axis=1)\n expected = DataFrame(pieces).T\n assert_frame_equal(result, expected)\n\n result = concat(pieces, keys=['A', 'B', 'C'], axis=1)\n expected = DataFrame(pieces, index=['A', 'B', 'C']).T\n assert_frame_equal(result, expected)\n\n # preserve series names, #2489\n s = Series(randn(5), name='A')\n s2 = Series(randn(5), name='B')\n\n result = concat([s, s2], axis=1)\n expected = DataFrame({'A': s, 'B': s2})\n assert_frame_equal(result, expected)\n\n s2.name = None\n result = concat([s, s2], axis=1)\n self.assertTrue(np.array_equal(result.columns, lrange(2)))\n\n # must reindex, #2603\n s = Series(randn(3), index=['c', 'a', 'b'], name='A')\n s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')\n result = concat([s, s2], axis=1)\n expected = DataFrame({'A': s, 'B': s2})\n assert_frame_equal(result, expected)\n\n def test_concat_single_with_key(self):\n df = DataFrame(np.random.randn(10, 4))\n\n result = concat([df], keys=['foo'])\n expected = concat([df, df], keys=['foo', 'bar'])\n tm.assert_frame_equal(result, expected[:10])\n\n def test_concat_exclude_none(self):\n df = DataFrame(np.random.randn(10, 4))\n\n pieces = [df[:5], None, None, df[5:]]\n result = concat(pieces)\n tm.assert_frame_equal(result, df)\n self.assertRaises(Exception, concat, [None, None])\n\n def test_concat_datetime64_block(self):\n from pandas.tseries.index import date_range\n\n rng = date_range('1/1/2000', periods=10)\n\n df = DataFrame({'time': rng})\n\n result = concat([df, df])\n self.assert_((result[:10]['time'] == rng).all())\n\n def test_concat_keys_with_none(self):\n # #1649\n df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])\n\n result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))\n expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))\n tm.assert_frame_equal(result, expected)\n\n result = concat([None, df0, df0[:2], df0[:1], df0],\n keys=['a', 'b', 'c', 'd', 'e'])\n expected = concat([df0, df0[:2], df0[:1], df0],\n keys=['b', 'c', 'd', 'e'])\n tm.assert_frame_equal(result, expected)\n\n def test_concat_bug_1719(self):\n ts1 = tm.makeTimeSeries()\n ts2 = tm.makeTimeSeries()[::2]\n\n ## to join with union\n ## these two are of different length!\n left = concat([ts1, ts2], join='outer', axis=1)\n right = concat([ts2, ts1], join='outer', axis=1)\n\n self.assertEqual(len(left), len(right))\n\n def test_concat_bug_2972(self):\n ts0 = Series(np.zeros(5))\n ts1 = Series(np.ones(5))\n ts0.name = ts1.name = 'same name'\n result = concat([ts0, ts1], axis=1)\n\n expected = DataFrame({0: ts0, 1: ts1})\n expected.columns=['same name', 'same name']\n assert_frame_equal(result, expected)\n\n def test_concat_bug_3602(self):\n\n # GH 3602, duplicate columns\n df1 = DataFrame({'firmNo' : [0,0,0,0], 'stringvar' : ['rrr', 'rrr', 'rrr', 'rrr'], 'prc' : [6,6,6,6] })\n df2 = DataFrame({'misc' : [1,2,3,4], 'prc' : [6,6,6,6], 'C' : [9,10,11,12]})\n expected = DataFrame([[0,6,'rrr',9,1,6],\n [0,6,'rrr',10,2,6],\n [0,6,'rrr',11,3,6],\n [0,6,'rrr',12,4,6]])\n expected.columns = ['firmNo','prc','stringvar','C','misc','prc']\n\n result = concat([df1,df2],axis=1)\n assert_frame_equal(result,expected)\n\n def test_concat_series_axis1_same_names_ignore_index(self):\n dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]\n s1 = Series(randn(len(dates)), index=dates, name='value')\n s2 = Series(randn(len(dates)), index=dates, name='value')\n\n result = concat([s1, s2], axis=1, ignore_index=True)\n self.assertTrue(np.array_equal(result.columns, [0, 1]))\n\n def test_concat_invalid_first_argument(self):\n df1 = mkdf(10, 2)\n df2 = mkdf(10, 2)\n self.assertRaises(AssertionError, concat, df1, df2)\n\n # generator ok though\n concat(DataFrame(np.random.rand(5,5)) for _ in range(3))\n\nclass TestOrderedMerge(unittest.TestCase):\n\n def setUp(self):\n self.left = DataFrame({'key': ['a', 'c', 'e'],\n 'lvalue': [1, 2., 3]})\n\n self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],\n 'rvalue': [1, 2, 3., 4]})\n\n # GH #813\n\n def test_basic(self):\n result = ordered_merge(self.left, self.right, on='key')\n expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],\n 'lvalue': [1, nan, 2, nan, 3, nan],\n 'rvalue': [nan, 1, 2, 3, nan, 4]})\n\n assert_frame_equal(result, expected)\n\n def test_ffill(self):\n result = ordered_merge(\n self.left, self.right, on='key', fill_method='ffill')\n expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],\n 'lvalue': [1., 1, 2, 2, 3, 3.],\n 'rvalue': [nan, 1, 2, 3, 3, 4]})\n assert_frame_equal(result, expected)\n\n def test_multigroup(self):\n left = concat([self.left, self.left], ignore_index=True)\n # right = concat([self.right, self.right], ignore_index=True)\n\n left['group'] = ['a'] * 3 + ['b'] * 3\n # right['group'] = ['a'] * 4 + ['b'] * 4\n\n result = ordered_merge(left, self.right, on='key', left_by='group',\n fill_method='ffill')\n expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,\n 'lvalue': [1., 1, 2, 2, 3, 3.] * 2,\n 'rvalue': [nan, 1, 2, 3, 3, 4] * 2})\n expected['group'] = ['a'] * 6 + ['b'] * 6\n\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n result2 = ordered_merge(self.right, left, on='key', right_by='group',\n fill_method='ffill')\n assert_frame_equal(result, result2.ix[:, result.columns])\n\n result = ordered_merge(left, self.right, on='key', left_by='group')\n self.assert_(result['group'].notnull().all())\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"import pandas.core.config as cf\nfrom pandas.core.config import (is_int, is_bool, is_text, is_float,\n is_instance_factory,is_one_of_factory,get_default_val)\nfrom pandas.core.format import detect_console_encoding\n\n\"\"\"\nThis module is imported from the pandas package __init__.py file\nin order to ensure that the core.config options registered here will\nbe available as soon as the user loads the package. if register_option\nis invoked inside specific modules, they will not be registered until that\nmodule is imported, which may or may not be a problem.\n\nIf you need to make sure options are available even before a certain\nmodule is imported, register them here rather then in the module.\n\n\"\"\"\n\n\n###########################################\n# options from the \"display\" namespace\n\npc_precision_doc = \"\"\"\n: int\n Floating point output precision (number of significant digits). This is\n only a suggestion\n\"\"\"\n\npc_colspace_doc = \"\"\"\n: int\n Default space for DataFrame columns.\n\"\"\"\n\npc_max_rows_doc = \"\"\"\n: int\n This sets the maximum number of rows pandas should output when printing\n out various output. For example, this value determines whether the repr()\n for a dataframe prints out fully or just a summary repr.\n 'None' value means unlimited.\n\"\"\"\n\npc_max_cols_doc = \"\"\"\n: int\n max_rows and max_columns are used in __repr__() methods to decide if\n to_string() or info() is used to render an object to a string. In case\n python/IPython is running in a terminal this can be set to 0 and pandas\n will correctly auto-detect the width the terminal and swap to a smaller\n format in case all columns would not fit vertically. The IPython notebook,\n IPython qtconsole, or IDLE do not run in a terminal and hence it is not\n possible to do correct auto-detection.\n 'None' value means unlimited.\n\"\"\"\n\npc_max_info_cols_doc = \"\"\"\n: int\n max_info_columns is used in DataFrame.info method to decide if\n per column information will be printed.\n\"\"\"\n\npc_nb_repr_h_doc = \"\"\"\n: boolean\n When True, IPython notebook will use html representation for\n pandas objects (if it is available).\n\"\"\"\n\npc_date_dayfirst_doc = \"\"\"\n: boolean\n When True, prints and parses dates with the day first, eg 20/01/2005\n\"\"\"\n\npc_date_yearfirst_doc = \"\"\"\n: boolean\n When True, prints and parses dates with the year first, eg 2005/01/20\n\"\"\"\n\npc_pprint_nest_depth = \"\"\"\n: int\n Controls the number of nested levels to process when pretty-printing\n\"\"\"\n\npc_multi_sparse_doc = \"\"\"\n: boolean\n \"sparsify\" MultiIndex display (don't display repeated\n elements in outer levels within groups)\n\"\"\"\n\npc_encoding_doc = \"\"\"\n: str/unicode\n Defaults to the detected encoding of the console.\n Specifies the encoding to be used for strings returned by to_string,\n these are generally strings meant to be displayed on the console.\n\"\"\"\n\nfloat_format_doc = \"\"\"\n: callable\n The callable should accept a floating point number and return\n a string with the desired format of the number. This is used\n in some places like SeriesFormatter.\n See core.format.EngFormatter for an example.\n\n\"\"\"\n\nmax_colwidth_doc = \"\"\"\n: int\n The maximum width in characters of a column in the repr of\n a pandas data structure. When the column overflows, a \"...\"\n placeholder is embedded in the output.\n\"\"\"\n\ncolheader_justify_doc = \"\"\"\n: 'left'/'right'\n Controls the justification of column headers. used by DataFrameFormatter.\n\"\"\"\n\npc_expand_repr_doc = \"\"\"\n: boolean\n Whether to print out the full DataFrame repr for wide DataFrames\n across multiple lines, `max_columns` is still respected, but the output will\n wrap-around across multiple \"pages\" if it's width exceeds `display.width`.\n\"\"\"\n\npc_line_width_doc = \"\"\"\n: int\n Deprecated.\n\"\"\"\n\npc_line_width_deprecation_warning = \"\"\"\\\nline_width has been deprecated, use display.width instead (currently both are identical)\n\"\"\"\n\npc_height_deprecation_warning = \"\"\"\\\nheight has been deprecated.\n\"\"\"\n\npc_width_doc = \"\"\"\n: int\n Width of the display in characters. In case python/IPython is running in\n a terminal this can be set to None and pandas will correctly auto-detect the\n width.\n Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a\n terminal and hence it is not possible to correctly detect the width.\n\"\"\"\n\npc_height_doc = \"\"\"\n: int\n Deprecated.\n\"\"\"\n\npc_chop_threshold_doc = \"\"\"\n: float or None\n if set to a float value, all float values smaller then the given threshold\n will be displayed as exactly 0 by repr and friends.\n\"\"\"\n\npc_max_seq_items = \"\"\"\n: int or None\n\n when pretty-printing a long sequence, no more then `max_seq_items`\n will be printed. If items are ommitted, they will be denoted by the addition\n of \"...\" to the resulting string.\n\n If set to None, the number of items to be printed is unlimited.\n\"\"\"\n\n\npc_max_info_rows_doc = \"\"\"\n: int or None\n max_info_rows is the maximum number of rows for which a frame will\n perform a null check on its columns when repr'ing To a console.\n The default is 1,000,000 rows. So, if a DataFrame has more\n 1,000,000 rows there will be no null check performed on the\n columns and thus the representation will take much less time to\n display in an interactive session. A value of None means always\n perform a null check when repr'ing.\n\"\"\"\n\npc_mpl_style_doc = \"\"\"\n: bool\n\n Setting this to 'default' will modify the rcParams used by matplotlib\n to give plots a more pleasing visual style by default.\n Setting this to None/False restores the values to their initial value.\n\"\"\"\n\nstyle_backup = dict()\ndef mpl_style_cb(key):\n import sys\n from pandas.tools.plotting import mpl_stylesheet\n global style_backup\n\n val = cf.get_option(key)\n\n if 'matplotlib' not in sys.modules.keys():\n if not(val): # starting up, we get reset to None\n return val\n raise Exception(\"matplotlib has not been imported. aborting\")\n\n import matplotlib.pyplot as plt\n\n\n if val == 'default':\n style_backup = dict([(k,plt.rcParams[k]) for k in mpl_stylesheet])\n plt.rcParams.update(mpl_stylesheet)\n elif not val:\n if style_backup:\n plt.rcParams.update(style_backup)\n\n return val\n\nwith cf.config_prefix('display'):\n cf.register_option('precision', 7, pc_precision_doc, validator=is_int)\n cf.register_option('float_format', None, float_format_doc)\n cf.register_option('column_space', 12, validator=is_int)\n cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,\n validator=is_instance_factory((int, type(None))))\n cf.register_option('max_rows', 60, pc_max_rows_doc,\n validator=is_instance_factory([type(None), int]))\n cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)\n cf.register_option('max_columns', 20, pc_max_cols_doc,\n validator=is_instance_factory([type(None), int]))\n cf.register_option('max_info_columns', 100, pc_max_info_cols_doc,\n validator=is_int)\n cf.register_option('colheader_justify', 'right', colheader_justify_doc,\n validator=is_text)\n cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,\n validator=is_bool)\n cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,\n validator=is_bool)\n cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,\n validator=is_bool)\n cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,\n validator=is_int)\n cf.register_option('multi_sparse', True, pc_multi_sparse_doc,\n validator=is_bool)\n cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,\n validator=is_text)\n cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)\n cf.register_option('chop_threshold', None, pc_chop_threshold_doc)\n cf.register_option('max_seq_items', None, pc_max_seq_items)\n cf.register_option('mpl_style', None, pc_mpl_style_doc,\n validator=is_one_of_factory([None, False, 'default']),\n cb=mpl_style_cb)\n cf.register_option('height', 60, pc_height_doc,\n validator=is_instance_factory([type(None), int]))\n cf.register_option('width',80, pc_width_doc,\n validator=is_instance_factory([type(None), int]))\n # redirected to width, make defval identical\n cf.register_option('line_width', get_default_val('display.width'), pc_line_width_doc)\n\ncf.deprecate_option('display.line_width',\n msg=pc_line_width_deprecation_warning,\n rkey='display.width')\n\ncf.deprecate_option('display.height',\n msg=pc_height_deprecation_warning,\n rkey='display.height')\n\ntc_sim_interactive_doc = \"\"\"\n: boolean\n Whether to simulate interactive mode for purposes of testing\n\"\"\"\nwith cf.config_prefix('mode'):\n cf.register_option('sim_interactive', False, tc_sim_interactive_doc)\n\nuse_inf_as_null_doc = \"\"\"\n: boolean\n True means treat None, NaN, INF, -INF as null (old way),\n False means None and NaN are null, but INF, -INF are not null\n (new way).\n\"\"\"\n\n# We don't want to start importing everything at the global context level\n# or we'll hit circular deps.\n\n\ndef use_inf_as_null_cb(key):\n from pandas.core.common import _use_inf_as_null\n _use_inf_as_null(key)\n\nwith cf.config_prefix('mode'):\n cf.register_option('use_inf_as_null', False, use_inf_as_null_doc,\n cb=use_inf_as_null_cb)\n\n\n# Set up the io.excel specific configuration.\nwriter_engine_doc = \"\"\"\n: string\n The default Excel writer engine for '{ext}' files. Available options: '{default}' (the default){others}.\n\"\"\"\n\nwith cf.config_prefix('io.excel'):\n # going forward, will be additional writers\n for ext, options in [('xls', ['xlwt']),\n ('xlsm', ['openpyxl']),\n ('xlsx', ['openpyxl'])]:\n default = options.pop(0)\n if options:\n options = \" \" + \", \".join(options)\n else:\n options = \"\"\n doc = writer_engine_doc.format(ext=ext, default=default,\n others=options)\n cf.register_option(ext + '.writer', default, doc, validator=str)\n",
"\"\"\":func:`~pandas.eval` parsers\n\"\"\"\n\nimport ast\nimport operator\nimport sys\nimport inspect\nimport tokenize\nimport datetime\nimport struct\n\nfrom functools import partial\n\nimport pandas as pd\nfrom pandas import compat\nfrom pandas.compat import StringIO, zip, reduce, string_types\nfrom pandas.core.base import StringMixin\nfrom pandas.core import common as com\nfrom pandas.computation.common import NameResolutionError\nfrom pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,\n _arith_ops_syms, _unary_ops_syms, is_term)\nfrom pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG\nfrom pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div\n\n\ndef _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None,\n **kwargs):\n \"\"\"Ensure that we are grabbing the correct scope.\"\"\"\n return Scope(gbls=global_dict, lcls=local_dict, level=level,\n resolvers=resolvers)\n\n\ndef _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys):\n \"\"\"Make sure that variables in resolvers don't overlap with locals or\n globals.\n \"\"\"\n res_locals = list(com.intersection(resolver_keys, local_keys))\n if res_locals:\n msg = \"resolvers and locals overlap on names {0}\".format(res_locals)\n raise NameResolutionError(msg)\n\n res_globals = list(com.intersection(resolver_keys, global_keys))\n if res_globals:\n msg = \"resolvers and globals overlap on names {0}\".format(res_globals)\n raise NameResolutionError(msg)\n\n\ndef _replacer(x, pad_size):\n \"\"\"Replace a number with its padded hexadecimal representation. Used to tag\n temporary variables with their calling scope's id.\n \"\"\"\n # get the hex repr of the binary char and remove 0x and pad by pad_size\n # zeros\n try:\n hexin = ord(x)\n except TypeError:\n # bytes literals masquerade as ints when iterating in py3\n hexin = x\n\n return hex(hexin).replace('0x', '').rjust(pad_size, '0')\n\n\ndef _raw_hex_id(obj, pad_size=2):\n \"\"\"Return the padded hexadecimal id of ``obj``.\"\"\"\n # interpret as a pointer since that's what really what id returns\n packed = struct.pack('@P', id(obj))\n\n return ''.join(_replacer(x, pad_size) for x in packed)\n\n\nclass Scope(StringMixin):\n \"\"\"Object to hold scope, with a few bells to deal with some custom syntax\n added by pandas.\n\n Parameters\n ----------\n gbls : dict or None, optional, default None\n lcls : dict or Scope or None, optional, default None\n level : int, optional, default 1\n resolvers : list-like or None, optional, default None\n\n Attributes\n ----------\n globals : dict\n locals : dict\n level : int\n resolvers : tuple\n resolver_keys : frozenset\n \"\"\"\n __slots__ = ('globals', 'locals', 'resolvers', '_global_resolvers',\n 'resolver_keys', '_resolver', 'level', 'ntemps')\n\n def __init__(self, gbls=None, lcls=None, level=1, resolvers=None):\n self.level = level\n self.resolvers = tuple(resolvers or [])\n self.globals = dict()\n self.locals = dict()\n self.ntemps = 1 # number of temporary variables in this scope\n\n if isinstance(lcls, Scope):\n ld, lcls = lcls, dict()\n self.locals.update(ld.locals.copy())\n self.globals.update(ld.globals.copy())\n self.resolvers += ld.resolvers\n self.update(ld.level)\n\n frame = sys._getframe(level)\n try:\n self.globals.update(gbls or frame.f_globals)\n self.locals.update(lcls or frame.f_locals)\n finally:\n del frame\n\n # add some useful defaults\n self.globals['Timestamp'] = pd.lib.Timestamp\n self.globals['datetime'] = datetime\n\n # SUCH a hack\n self.globals['True'] = True\n self.globals['False'] = False\n\n res_keys = (list(o.keys()) for o in self.resolvers)\n self.resolver_keys = frozenset(reduce(operator.add, res_keys, []))\n self._global_resolvers = self.resolvers + (self.locals, self.globals)\n self._resolver = None\n\n self.resolver_dict = {}\n for o in self.resolvers:\n self.resolver_dict.update(dict(o))\n\n def __unicode__(self):\n return com.pprint_thing(\"locals: {0}\\nglobals: {0}\\nresolvers: \"\n \"{0}\".format(list(self.locals.keys()),\n list(self.globals.keys()),\n list(self.resolver_keys)))\n\n def __getitem__(self, key):\n return self.resolve(key, globally=False)\n\n def resolve(self, key, globally=False):\n resolvers = self.locals, self.globals\n if globally:\n resolvers = self._global_resolvers\n\n for resolver in resolvers:\n try:\n return resolver[key]\n except KeyError:\n pass\n\n def update(self, level=None):\n \"\"\"Update the current scope by going back `level` levels.\n\n Parameters\n ----------\n level : int or None, optional, default None\n \"\"\"\n # we are always 2 levels below the caller\n # plus the caller may be below the env level\n # in which case we need addtl levels\n sl = 2\n if level is not None:\n sl += level\n\n # add sl frames to the scope starting with the\n # most distant and overwritting with more current\n # makes sure that we can capture variable scope\n frame = inspect.currentframe()\n try:\n frames = []\n while sl >= 0:\n frame = frame.f_back\n sl -= 1\n frames.append(frame)\n for f in frames[::-1]:\n self.locals.update(f.f_locals)\n self.globals.update(f.f_globals)\n finally:\n del frame, frames\n\n def add_tmp(self, value, where='locals'):\n \"\"\"Add a temporary variable to the scope.\n\n Parameters\n ----------\n value : object\n An arbitrary object to be assigned to a temporary variable.\n where : basestring, optional, default 'locals', {'locals', 'globals'}\n What scope to add the value to.\n\n Returns\n -------\n name : basestring\n The name of the temporary variable created.\n \"\"\"\n d = getattr(self, where, None)\n\n if d is None:\n raise AttributeError(\"Cannot add value to non-existent scope \"\n \"{0!r}\".format(where))\n if not isinstance(d, dict):\n raise TypeError(\"Cannot add value to object of type {0!r}, \"\n \"scope must be a dictionary\"\n \"\".format(type(d).__name__))\n name = 'tmp_var_{0}_{1}_{2}'.format(type(value).__name__, self.ntemps,\n _raw_hex_id(self))\n d[name] = value\n\n # only increment if the variable gets put in the scope\n self.ntemps += 1\n return name\n\n def remove_tmp(self, name, where='locals'):\n d = getattr(self, where, None)\n if d is None:\n raise AttributeError(\"Cannot remove value from non-existent scope \"\n \"{0!r}\".format(where))\n if not isinstance(d, dict):\n raise TypeError(\"Cannot remove value from object of type {0!r}, \"\n \"scope must be a dictionary\"\n \"\".format(type(d).__name__))\n del d[name]\n self.ntemps -= 1\n\n\ndef _rewrite_assign(source):\n \"\"\"Rewrite the assignment operator for PyTables expression that want to use\n ``=`` as a substitute for ``==``.\n \"\"\"\n res = []\n g = tokenize.generate_tokens(StringIO(source).readline)\n for toknum, tokval, _, _, _ in g:\n res.append((toknum, '==' if tokval == '=' else tokval))\n return tokenize.untokenize(res)\n\n\ndef _replace_booleans(source):\n \"\"\"Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise\n precedence is changed to boolean precedence.\n \"\"\"\n return source.replace('|', ' or ').replace('&', ' and ')\n\n\ndef _replace_locals(source, local_symbol='@'):\n \"\"\"Replace local variables with a syntacticall valid name.\"\"\"\n return source.replace(local_symbol, _LOCAL_TAG)\n\n\ndef _preparse(source):\n \"\"\"Compose assignment and boolean replacement.\"\"\"\n return _replace_booleans(_rewrite_assign(source))\n\n\ndef _is_type(t):\n \"\"\"Factory for a type checking function of type ``t`` or tuple of types.\"\"\"\n return lambda x: isinstance(x.value, t)\n\n\n_is_list = _is_type(list)\n_is_str = _is_type(string_types)\n\n\n# partition all AST nodes\n_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and\n issubclass(x, ast.AST),\n (getattr(ast, node) for node in dir(ast))))\n\n\ndef _filter_nodes(superclass, all_nodes=_all_nodes):\n \"\"\"Filter out AST nodes that are subclasses of ``superclass``.\"\"\"\n node_names = (node.__name__ for node in all_nodes\n if issubclass(node, superclass))\n return frozenset(node_names)\n\n\n_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))\n_mod_nodes = _filter_nodes(ast.mod)\n_stmt_nodes = _filter_nodes(ast.stmt)\n_expr_nodes = _filter_nodes(ast.expr)\n_expr_context_nodes = _filter_nodes(ast.expr_context)\n_slice_nodes = _filter_nodes(ast.slice)\n_boolop_nodes = _filter_nodes(ast.boolop)\n_operator_nodes = _filter_nodes(ast.operator)\n_unary_op_nodes = _filter_nodes(ast.unaryop)\n_cmp_op_nodes = _filter_nodes(ast.cmpop)\n_comprehension_nodes = _filter_nodes(ast.comprehension)\n_handler_nodes = _filter_nodes(ast.excepthandler)\n_arguments_nodes = _filter_nodes(ast.arguments)\n_keyword_nodes = _filter_nodes(ast.keyword)\n_alias_nodes = _filter_nodes(ast.alias)\n\n\n# nodes that we don't support directly but are needed for parsing\n_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])\n\n\n_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',\n 'DictComp', 'SetComp', 'Repr', 'Lambda',\n 'Set', 'AST', 'Is', 'IsNot'])\n\n# these nodes are low priority or won't ever be supported (e.g., AST)\n_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |\n _arguments_nodes | _keyword_nodes | _alias_nodes |\n _expr_context_nodes | _unsupported_expr_nodes) -\n _hacked_nodes)\n\n# we're adding a different assignment in some cases to be equality comparison\n# and we don't want `stmt` and friends in their so get only the class whose\n# names are capitalized\n_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes\n_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &\n _base_supported_nodes)\nassert not _unsupported_nodes & _base_supported_nodes, _msg\n\n\ndef _node_not_implemented(node_name, cls):\n \"\"\"Return a function that raises a NotImplementedError with a passed node\n name.\n \"\"\"\n def f(self, *args, **kwargs):\n raise NotImplementedError(\"{0!r} nodes are not \"\n \"implemented\".format(node_name))\n return f\n\n\ndef disallow(nodes):\n \"\"\"Decorator to disallow certain nodes from parsing. Raises a\n NotImplementedError instead.\n\n Returns\n -------\n disallowed : callable\n \"\"\"\n def disallowed(cls):\n cls.unsupported_nodes = ()\n for node in nodes:\n new_method = _node_not_implemented(node, cls)\n name = 'visit_{0}'.format(node)\n cls.unsupported_nodes += (name,)\n setattr(cls, name, new_method)\n return cls\n return disallowed\n\n\ndef _op_maker(op_class, op_symbol):\n \"\"\"Return a function to create an op class with its symbol already passed.\n\n Returns\n -------\n f : callable\n \"\"\"\n def f(self, node, *args, **kwargs):\n \"\"\"Return a partial function with an Op subclass with an operator\n already passed.\n\n Returns\n -------\n f : callable\n \"\"\"\n return partial(op_class, op_symbol, *args, **kwargs)\n return f\n\n\n_op_classes = {'binary': BinOp, 'unary': UnaryOp}\n\n\ndef add_ops(op_classes):\n \"\"\"Decorator to add default implementation of ops.\"\"\"\n def f(cls):\n for op_attr_name, op_class in compat.iteritems(op_classes):\n ops = getattr(cls, '{0}_ops'.format(op_attr_name))\n ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))\n for op in ops:\n op_node = ops_map[op]\n if op_node is not None:\n made_op = _op_maker(op_class, op)\n setattr(cls, 'visit_{0}'.format(op_node), made_op)\n return cls\n return f\n\n\n@disallow(_unsupported_nodes)\n@add_ops(_op_classes)\nclass BaseExprVisitor(ast.NodeVisitor):\n \"\"\"Custom ast walker. Parsers of other engines should subclass this class\n if necessary.\n\n Parameters\n ----------\n env : Scope\n engine : str\n parser : str\n preparser : callable\n \"\"\"\n const_type = Constant\n term_type = Term\n\n binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms\n binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',\n 'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',\n None, 'Pow', 'FloorDiv', 'Mod')\n binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))\n\n unary_ops = _unary_ops_syms\n unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'\n unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))\n\n rewrite_map = {\n ast.Eq: ast.In,\n ast.NotEq: ast.NotIn,\n ast.In: ast.In,\n ast.NotIn: ast.NotIn\n }\n\n def __init__(self, env, engine, parser, preparser=_preparse):\n self.env = env\n self.engine = engine\n self.parser = parser\n self.preparser = preparser\n\n def visit(self, node, **kwargs):\n if isinstance(node, string_types):\n clean = self.preparser(node)\n node = ast.fix_missing_locations(ast.parse(clean))\n elif not isinstance(node, ast.AST):\n raise TypeError(\"Cannot visit objects of type {0!r}\"\n \"\".format(node.__class__.__name__))\n\n method = 'visit_' + node.__class__.__name__\n visitor = getattr(self, method)\n return visitor(node, **kwargs)\n\n def visit_Module(self, node, **kwargs):\n if len(node.body) != 1:\n raise SyntaxError('only a single expression is allowed')\n expr = node.body[0]\n return self.visit(expr, **kwargs)\n\n def visit_Expr(self, node, **kwargs):\n return self.visit(node.value, **kwargs)\n\n def _rewrite_membership_op(self, node, left, right):\n # the kind of the operator (is actually an instance)\n op_instance = node.op\n op_type = type(op_instance)\n\n # must be two terms and the comparison operator must be ==/!=/in/not in\n if is_term(left) and is_term(right) and op_type in self.rewrite_map:\n\n left_list, right_list = map(_is_list, (left, right))\n left_str, right_str = map(_is_str, (left, right))\n\n # if there are any strings or lists in the expression\n if left_list or right_list or left_str or right_str:\n op_instance = self.rewrite_map[op_type]()\n\n # pop the string variable out of locals and replace it with a list\n # of one string, kind of a hack\n if right_str:\n self.env.remove_tmp(right.name)\n name = self.env.add_tmp([right.value])\n right = self.term_type(name, self.env)\n\n if left_str:\n self.env.remove_tmp(left.name)\n name = self.env.add_tmp([left.value])\n left = self.term_type(name, self.env)\n\n op = self.visit(op_instance)\n return op, op_instance, left, right\n\n def _possibly_transform_eq_ne(self, node, left=None, right=None):\n if left is None:\n left = self.visit(node.left, side='left')\n if right is None:\n right = self.visit(node.right, side='right')\n op, op_class, left, right = self._rewrite_membership_op(node, left,\n right)\n return op, op_class, left, right\n\n def _possibly_eval(self, binop, eval_in_python):\n # eval `in` and `not in` (for now) in \"partial\" python space\n # things that can be evaluated in \"eval\" space will be turned into\n # temporary variables. for example,\n # [1,2] in a + 2 * b\n # in that case a + 2 * b will be evaluated using numexpr, and the \"in\"\n # call will be evaluated using isin (in python space)\n return binop.evaluate(self.env, self.engine, self.parser,\n self.term_type, eval_in_python)\n\n def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,\n eval_in_python=('in', 'not in'),\n maybe_eval_in_python=('==', '!=')):\n res = op(lhs, rhs)\n\n # \"in\"/\"not in\" ops are always evaluated in python\n if res.op in eval_in_python:\n return self._possibly_eval(res, eval_in_python)\n elif (lhs.return_type == object or rhs.return_type == object and\n self.engine != 'pytables'):\n # evaluate \"==\" and \"!=\" in python if either of our operands has an\n # object return type\n return self._possibly_eval(res, eval_in_python +\n maybe_eval_in_python)\n return res\n\n def visit_BinOp(self, node, **kwargs):\n op, op_class, left, right = self._possibly_transform_eq_ne(node)\n return self._possibly_evaluate_binop(op, op_class, left, right)\n\n def visit_Div(self, node, **kwargs):\n return lambda lhs, rhs: Div(lhs, rhs,\n truediv=self.env.locals['truediv'])\n\n def visit_UnaryOp(self, node, **kwargs):\n op = self.visit(node.op)\n operand = self.visit(node.operand)\n return op(operand)\n\n def visit_Name(self, node, **kwargs):\n return self.term_type(node.id, self.env, **kwargs)\n\n def visit_Num(self, node, **kwargs):\n return self.const_type(node.n, self.env)\n\n def visit_Str(self, node, **kwargs):\n name = self.env.add_tmp(node.s)\n return self.term_type(name, self.env)\n\n def visit_List(self, node, **kwargs):\n name = self.env.add_tmp([self.visit(e).value for e in node.elts])\n return self.term_type(name, self.env)\n\n visit_Tuple = visit_List\n\n def visit_Index(self, node, **kwargs):\n \"\"\" df.index[4] \"\"\"\n return self.visit(node.value)\n\n def visit_Subscript(self, node, **kwargs):\n value = self.visit(node.value)\n slobj = self.visit(node.slice)\n result = pd.eval(slobj, local_dict=self.env, engine=self.engine,\n parser=self.parser)\n try:\n # a Term instance\n v = value.value[result]\n except AttributeError:\n # an Op instance\n lhs = pd.eval(value, local_dict=self.env, engine=self.engine,\n parser=self.parser)\n v = lhs[result]\n name = self.env.add_tmp(v)\n return self.term_type(name, env=self.env)\n\n def visit_Slice(self, node, **kwargs):\n \"\"\" df.index[slice(4,6)] \"\"\"\n lower = node.lower\n if lower is not None:\n lower = self.visit(lower).value\n upper = node.upper\n if upper is not None:\n upper = self.visit(upper).value\n step = node.step\n if step is not None:\n step = self.visit(step).value\n\n return slice(lower, upper, step)\n\n def visit_Assign(self, node, **kwargs):\n cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],\n comparators=[node.value])\n return self.visit(cmpr)\n\n def visit_Attribute(self, node, **kwargs):\n attr = node.attr\n value = node.value\n\n ctx = node.ctx\n if isinstance(ctx, ast.Load):\n # resolve the value\n resolved = self.visit(value).value\n try:\n v = getattr(resolved, attr)\n name = self.env.add_tmp(v)\n return self.term_type(name, self.env)\n except AttributeError:\n # something like datetime.datetime where scope is overriden\n if isinstance(value, ast.Name) and value.id == attr:\n return resolved\n\n raise ValueError(\"Invalid Attribute context {0}\".format(ctx.__name__))\n\n def visit_Call(self, node, **kwargs):\n\n # this can happen with: datetime.datetime\n if isinstance(node.func, ast.Attribute):\n res = self.visit_Attribute(node.func)\n elif not isinstance(node.func, ast.Name):\n raise TypeError(\"Only named functions are supported\")\n else:\n res = self.visit(node.func)\n\n if res is None:\n raise ValueError(\"Invalid function call {0}\".format(node.func.id))\n if hasattr(res, 'value'):\n res = res.value\n\n args = [self.visit(targ).value for targ in node.args]\n if node.starargs is not None:\n args = args + self.visit(node.starargs).value\n\n keywords = {}\n for key in node.keywords:\n if not isinstance(key, ast.keyword):\n raise ValueError(\"keyword error in function call \"\n \"'{0}'\".format(node.func.id))\n keywords[key.arg] = self.visit(key.value).value\n if node.kwargs is not None:\n keywords.update(self.visit(node.kwargs).value)\n\n return self.const_type(res(*args, **keywords), self.env)\n\n def translate_In(self, op):\n return op\n\n def visit_Compare(self, node, **kwargs):\n ops = node.ops\n comps = node.comparators\n\n # base case: we have something like a CMP b\n if len(comps) == 1:\n op = self.translate_In(ops[0])\n binop = ast.BinOp(op=op, left=node.left, right=comps[0])\n return self.visit(binop)\n\n # recursive case: we have a chained comparison, a CMP b CMP c, etc.\n left = node.left\n values = []\n for op, comp in zip(ops, comps):\n new_node = self.visit(ast.Compare(comparators=[comp], left=left,\n ops=[self.translate_In(op)]))\n left = comp\n values.append(new_node)\n return self.visit(ast.BoolOp(op=ast.And(), values=values))\n\n def _try_visit_binop(self, bop):\n if isinstance(bop, (Op, Term)):\n return bop\n return self.visit(bop)\n\n def visit_BoolOp(self, node, **kwargs):\n def visitor(x, y):\n lhs = self._try_visit_binop(x)\n rhs = self._try_visit_binop(y)\n\n op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,\n rhs)\n return self._possibly_evaluate_binop(op, node.op, lhs, rhs)\n\n operands = node.values\n return reduce(visitor, operands)\n\n\n_python_not_supported = frozenset(['Assign', 'Dict', 'Call', 'BoolOp',\n 'In', 'NotIn'])\n_numexpr_supported_calls = frozenset(_reductions + _mathops)\n\n\n@disallow((_unsupported_nodes | _python_not_supported) -\n (_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',\n 'Tuple'])))\nclass PandasExprVisitor(BaseExprVisitor):\n def __init__(self, env, engine, parser,\n preparser=lambda x: _replace_locals(_replace_booleans(x))):\n super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)\n\n\n@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))\nclass PythonExprVisitor(BaseExprVisitor):\n def __init__(self, env, engine, parser, preparser=lambda x: x):\n super(PythonExprVisitor, self).__init__(env, engine, parser,\n preparser=preparser)\n\n\nclass Expr(StringMixin):\n \"\"\"Object encapsulating an expression.\n\n Parameters\n ----------\n expr : str\n engine : str, optional, default 'numexpr'\n parser : str, optional, default 'pandas'\n env : Scope, optional, default None\n truediv : bool, optional, default True\n level : int, optional, default 2\n \"\"\"\n def __init__(self, expr, engine='numexpr', parser='pandas', env=None,\n truediv=True, level=2):\n self.expr = expr\n self.env = _ensure_scope(level=level, local_dict=env)\n self.engine = engine\n self.parser = parser\n self._visitor = _parsers[parser](self.env, self.engine, self.parser)\n self.terms = self.parse()\n self.truediv = truediv\n\n def __call__(self):\n self.env.locals['truediv'] = self.truediv\n return self.terms(self.env)\n\n def __unicode__(self):\n return com.pprint_thing(self.terms)\n\n def __len__(self):\n return len(self.expr)\n\n def parse(self):\n \"\"\"Parse an expression\"\"\"\n return self._visitor.visit(self.expr)\n\n def align(self):\n \"\"\"align a set of Terms\"\"\"\n return self.terms.align(self.env)\n\n @property\n def names(self):\n \"\"\"Get the names in an expression\"\"\"\n if is_term(self.terms):\n return frozenset([self.terms.name])\n return frozenset(term.name for term in com.flatten(self.terms))\n\n def check_name_clashes(self):\n env = self.env\n names = self.names\n res_keys = frozenset(env.resolver_dict.keys()) & names\n lcl_keys = frozenset(env.locals.keys()) & names\n gbl_keys = frozenset(env.globals.keys()) & names\n _check_disjoint_resolver_names(res_keys, lcl_keys, gbl_keys)\n\n def add_resolvers_to_locals(self):\n \"\"\"Add the extra scope (resolvers) to local scope\n\n Notes\n -----\n This should be done after parsing and pre-evaluation, otherwise\n unnecessary name clashes will occur.\n \"\"\"\n self.env.locals.update(self.env.resolver_dict)\n\n\ndef isexpr(s, check_names=True):\n \"\"\"Strict checking for a valid expression.\"\"\"\n try:\n Expr(s, env=_ensure_scope() if check_names else None)\n except SyntaxError:\n return False\n except NameError:\n return not check_names\n return True\n\n\n_parsers = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor}\n"
] |
[
[
"pandas.Series",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"numpy.concatenate",
"pandas.util.testing.makePanel",
"numpy.random.randn",
"pandas.compat.iteritems",
"pandas.compat.lzip",
"numpy.random.randint",
"pandas.util.testing.makePanel4D",
"pandas.util.testing.add_nans",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.util.testing.assert_panel4d_equal",
"pandas.util.testing.assert_panel_equal",
"numpy.zeros",
"pandas.algos.inner_join",
"pandas.MultiIndex",
"numpy.isnan",
"pandas.tools.merge.concat",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.getMixedTypeDict",
"pandas.util.testing.getSeriesData",
"pandas.tools.merge.ordered_merge",
"pandas.tools.merge.merge",
"pandas.Panel.from_dict",
"numpy.random.rand",
"pandas.DataFrame.from_dict",
"numpy.array",
"pandas.util.testing.makeCustomDataframe",
"numpy.random.random",
"pandas.util.testing.makeTimeSeries",
"numpy.array_equal",
"pandas.isnull",
"numpy.tile",
"pandas.MultiIndex.from_arrays",
"pandas.tseries.index.date_range",
"numpy.ones",
"pandas.algos.left_outer_join",
"pandas.util.testing.rands",
"pandas.compat.lrange",
"pandas.compat.range"
],
[
"pandas.core.config.register_option",
"pandas.core.config.get_option",
"pandas.core.config.deprecate_option",
"pandas.core.config.config_prefix",
"pandas.core.config.get_default_val",
"pandas.core.common._use_inf_as_null",
"pandas.core.config.is_one_of_factory",
"pandas.core.format.detect_console_encoding",
"matplotlib.pyplot.rcParams.update"
],
[
"pandas.core.common.flatten",
"pandas.eval",
"pandas.core.common.intersection",
"pandas.computation.common.NameResolutionError",
"pandas.compat.reduce",
"pandas.compat.StringIO",
"pandas.computation.ops.is_term",
"pandas.compat.zip",
"pandas.compat.iteritems",
"pandas.core.common.pprint_thing",
"pandas.computation.ops.Div"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.19"
],
"scipy": [],
"tensorflow": []
}
] |
XWilliamY/Dancing2Music
|
[
"2352a99e0dcbd74e621b40a47fc2142089e9b400"
] |
[
"mfcc_rev.py"
] |
[
"import numpy, numpy.fft\nimport math\n\ndef mel(f):\n return 2595. * numpy.log10(1. + f / 700.)\n\ndef melinv(m):\n return 700. * (numpy.power(10., m / 2595.) - 1.)\n\nclass MFCC(object):\n def __init__(self, nfilt=40, ncep=13,\n lowerf=133.3333, upperf=6855.4976, alpha=0.97,\n samprate=22050, frate=15, wlen=0.0667,\n nfft=512):\n # Store parameters\n self.lowerf = lowerf\n self.upperf = upperf\n self.nfft = nfft\n self.ncep = ncep\n self.nfilt = nfilt\n self.frate = frate\n self.samprate = samprate\n self.fshift = float(samprate) / frate\n\n # Build Hamming window\n self.wlen = int(wlen * samprate)\n self.win = numpy.hamming(self.wlen)\n\n # Prior sample for pre-emphasis\n self.prior = 0\n self.alpha = alpha\n\n # Build mel filter matrix\n self.filters = numpy.zeros((nfft//2+1,nfilt), 'd')\n dfreq = float(samprate) / nfft\n if upperf > samprate/2:\n raise(Exception,\n \"Upper frequency %f exceeds Nyquist %f\" % (upperf, samprate/2))\n melmax = mel(upperf)\n melmin = mel(lowerf)\n dmelbw = (melmax - melmin) / (nfilt + 1)\n # Filter edges, in Hz\n filt_edge = melinv(melmin + dmelbw * numpy.arange(nfilt + 2, dtype='d'))\n\n for whichfilt in range(0, nfilt):\n # Filter triangles, in DFT points\n leftfr = round(filt_edge[whichfilt] / dfreq)\n centerfr = round(filt_edge[whichfilt + 1] / dfreq)\n rightfr = round(filt_edge[whichfilt + 2] / dfreq)\n # For some reason this is calculated in Hz, though I think\n # it doesn't really matter\n fwidth = (rightfr - leftfr) * dfreq\n height = 2. / fwidth\n\n if centerfr != leftfr:\n leftslope = height / (centerfr - leftfr)\n else:\n leftslope = 0\n freq = int(leftfr + 1)\n while freq < centerfr:\n self.filters[freq,whichfilt] = (freq - leftfr) * leftslope\n freq = freq + 1\n if freq == centerfr: # This is always true\n self.filters[freq,whichfilt] = height\n freq = freq + 1\n if centerfr != rightfr:\n rightslope = height / (centerfr - rightfr)\n while freq < rightfr:\n self.filters[freq,whichfilt] = (freq - rightfr) * rightslope\n freq = freq + 1\n# print(\"Filter %d: left %d=%f center %d=%f right %d=%f width %d\" %\n# (whichfilt,\n# leftfr, leftfr*dfreq,\n# centerfr, centerfr*dfreq,\n# rightfr, rightfr*dfreq,\n# freq - leftfr))\n# print self.filters[leftfr:rightfr,whichfilt]\n\n # Build DCT matrix\n self.s2dct = s2dctmat(nfilt, ncep, 1./nfilt)\n self.dct = dctmat(nfilt, ncep, numpy.pi/nfilt)\n\n def sig2s2mfc(self, sig):\n nfr = int(len(sig) / self.fshift + 1)\n mfcc = numpy.zeros((nfr, self.ncep), 'd')\n fr = 0\n while fr < nfr:\n start = round(fr * self.fshift)\n end = min(len(sig), start + self.wlen)\n frame = sig[start:end]\n if len(frame) < self.wlen:\n frame = numpy.resize(frame,self.wlen)\n frame[self.wlen:] = 0\n mfcc[fr] = self.frame2s2mfc(frame)\n fr = fr + 1\n return mfcc\n\n def sig2s2mfc_energy(self, sig):\n nfr = int(len(sig) / self.fshift + 1)\n mfcc = numpy.zeros((nfr, self.ncep + 2), 'd')\n fr = 0\n while fr < nfr:\n start = int(round(fr * self.fshift))\n end = min(len(sig), start + self.wlen)\n frame = sig[start:end]\n if len(frame) < self.wlen:\n frame = numpy.resize(frame,self.wlen)\n frame[self.wlen:] = 0\n mfcc[fr,:-2] = self.frame2s2mfc(frame)\n mfcc[fr, -2] = math.log(1 + numpy.mean(numpy.power(frame.astype(float), 2)))\n mid = 0.5 * (start + end - 1)\n mfcc[fr, -1] = mid / self.samprate\n \n fr = fr + 1\n return mfcc\n\n\n def sig2logspec(self, sig):\n nfr = int(len(sig) / self.fshift + 1)\n mfcc = numpy.zeros((nfr, self.nfilt), 'd')\n fr = 0\n while fr < nfr:\n start = round(fr * self.fshift)\n end = min(len(sig), start + self.wlen)\n frame = sig[start:end]\n if len(frame) < self.wlen:\n frame = numpy.resize(frame,self.wlen)\n frame[self.wlen:] = 0\n mfcc[fr] = self.frame2logspec(frame)\n fr = fr + 1\n return mfcc\n\n def pre_emphasis(self, frame):\n # FIXME: Do this with matrix multiplication\n outfr = numpy.empty(len(frame), 'd')\n outfr[0] = frame[0] - self.alpha * self.prior\n for i in range(1,len(frame)):\n outfr[i] = frame[i] - self.alpha * frame[i-1]\n self.prior = frame[-1]\n return outfr\n \n def frame2logspec(self, frame):\n frame = self.pre_emphasis(frame) * self.win\n fft = numpy.fft.rfft(frame, self.nfft)\n # Square of absolute value\n power = fft.real * fft.real + fft.imag * fft.imag\n return numpy.log(numpy.dot(power, self.filters).clip(1e-5,numpy.inf))\n\n def frame2s2mfc(self, frame):\n logspec = self.frame2logspec(frame)\n return numpy.dot(logspec, self.s2dct.T) / self.nfilt\n\ndef s2dctmat(nfilt,ncep,freqstep):\n \"\"\"Return the 'legacy' not-quite-DCT matrix used by Sphinx\"\"\"\n melcos = numpy.empty((ncep, nfilt), 'double')\n for i in range(0,ncep):\n freq = numpy.pi * float(i) / nfilt\n melcos[i] = numpy.cos(freq * numpy.arange(0.5, float(nfilt)+0.5, 1.0, 'double'))\n melcos[:,0] = melcos[:,0] * 0.5\n return melcos\n\ndef logspec2s2mfc(logspec, ncep=13):\n \"\"\"Convert log-power-spectrum bins to MFCC using the 'legacy'\n Sphinx transform\"\"\"\n nframes, nfilt = logspec.shape\n melcos = s2dctmat(nfilt, ncep, 1./nfilt)\n return numpy.dot(logspec, melcos.T) / nfilt\n\ndef dctmat(N,K,freqstep,orthogonalize=True):\n \"\"\"Return the orthogonal DCT-II/DCT-III matrix of size NxK.\n For computing or inverting MFCCs, N is the number of\n log-power-spectrum bins while K is the number of cepstra.\"\"\"\n cosmat = numpy.zeros((N, K), 'double')\n for n in range(0,N):\n for k in range(0, K):\n cosmat[n,k] = numpy.cos(freqstep * (n + 0.5) * k)\n if orthogonalize:\n cosmat[:,0] = cosmat[:,0] * 1./numpy.sqrt(2)\n return cosmat\n\ndef dct(input, K=13):\n \"\"\"Convert log-power-spectrum to MFCC using the orthogonal DCT-II\"\"\"\n nframes, N = input.shape\n freqstep = numpy.pi / N\n cosmat = dctmat(N,K,freqstep)\n return numpy.dot(input, cosmat) * numpy.sqrt(2.0 / N)\n\ndef dct2(input, K=13):\n \"\"\"Convert log-power-spectrum to MFCC using the normalized DCT-II\"\"\"\n nframes, N = input.shape\n freqstep = numpy.pi / N\n cosmat = dctmat(N,K,freqstep,False)\n return numpy.dot(input, cosmat) * (2.0 / N)\n\ndef idct(input, K=40):\n \"\"\"Convert MFCC to log-power-spectrum using the orthogonal DCT-III\"\"\"\n nframes, N = input.shape\n freqstep = numpy.pi / K\n cosmat = dctmat(K,N,freqstep).T\n return numpy.dot(input, cosmat) * numpy.sqrt(2.0 / K)\n\ndef dct3(input, K=40):\n \"\"\"Convert MFCC to log-power-spectrum using the unnormalized DCT-III\"\"\"\n nframes, N = input.shape\n freqstep = numpy.pi / K\n cosmat = dctmat(K,N,freqstep,False)\n cosmat[:,0] = cosmat[:,0] * 0.5\n return numpy.dot(input, cosmat.T)\n"
] |
[
[
"numpy.dot",
"numpy.resize",
"numpy.sqrt",
"numpy.fft.rfft",
"numpy.power",
"numpy.arange",
"numpy.cos",
"numpy.log10",
"numpy.hamming",
"numpy.zeros",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
telmuunwj/frankmocap1
|
[
"86b87833fe4597433e3be4e457c79d6edb6bdc43"
] |
[
"mocap_utils/compose_image.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport os, sys, shutil\nimport os.path as osp\nimport cv2\nimport numpy as np\nfrom . import general_utils as gnu\n\n\ndef main():\n in_dir = \"./sample_data/images/single_person\"\n out_dir = \"./sample_data/images/multi_person\"\n gnu.renew_dir(out_dir)\n\n all_imgs = gnu.get_all_files(in_dir, (\".jpg\", \".png\", \".jpeg\"), \"full\")\n num_img = len(all_imgs)\n\n for i in range(num_img):\n for j in range(num_img):\n img1 = cv2.imread(all_imgs[i])\n img2 = cv2.imread(all_imgs[j])\n img2 = cv2.resize(img2, img1.shape[:2][::-1])\n res_img = np.concatenate((img1, img2), axis=1)\n res_img_path = osp.join(out_dir, f\"{i:02d}_{j:02d}.jpg\")\n cv2.imwrite(res_img_path, res_img)\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"numpy.concatenate"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chandar-lab/RLHive
|
[
"c5b1b77a3daf87aeb877e10c4f65bcd7e2bc1ff8"
] |
[
"hive/envs/marlgrid/ma_envs/checkers.py"
] |
[
"import numpy as np\nfrom marlgrid.base import MultiGrid\nfrom marlgrid.objects import Goal, GridAgent\n\nfrom hive.envs.marlgrid.ma_envs.base import MultiGridEnvHive\n\n\nclass CheckersMultiGrid(MultiGridEnvHive):\n \"\"\"\n Checkers environment based on sunehag et al. 2017\n\n \"... The map contains apples and lemons. The first player is very sensitive and scores 10 for\n the team for an apple (green square) and −10 for a lemon (orange square).\n The second, less sensitive player scores 1 for the team for an apple and −1 for a lemon.\n There is a wall of lemons between the players and the apples.\n Apples and lemons disappear when collected.\n The environment resets when all apples are eaten or maximum number of steps is reached.\n \"\"\"\n\n def _gen_grid(self, width, height):\n self.num_rows = 3\n self.grid = MultiGrid((width, height))\n self.grid.wall_rect(0, 0, width, height)\n apple = Goal(color=\"green\", reward=10)\n orange = Goal(color=\"red\", reward=-10)\n self.num_remained_apples = 0\n for j in range(self.num_rows):\n oranges_loc = [2 * i + 1 + j % 2 for i in range(width // 2 - 1)]\n apples_loc = [2 * i + 1 + (j + 1) % 2 for i in range(width // 2 - 1)]\n for orange_loc in oranges_loc:\n self.put_obj(orange, orange_loc, j + 1)\n\n for apple_loc in apples_loc:\n self.put_obj(apple, apple_loc, j + 1)\n self.num_remained_apples += 1\n\n self.agent_spawn_kwargs = {}\n self.ghost_mode = False\n\n def reset(self, **kwargs):\n for agent in self.agents:\n agent.agents = []\n agent.reset(new_episode=True)\n\n self._gen_grid(self.width, self.height)\n\n for agent in self.agents:\n if agent.spawn_delay == 0:\n self.place_obj(\n agent,\n top=(0, self.num_rows + 1),\n size=(self.width, self.height - self.num_rows - 1),\n **self.agent_spawn_kwargs,\n )\n agent.activate()\n\n self.step_count = 0\n obs = self.gen_obs()\n for ag_idx, _ in enumerate(obs):\n obs[ag_idx] = np.array(obs[ag_idx], dtype=np.uint8)\n return obs\n\n def step(self, actions):\n # Spawn agents if it's time.\n for agent in self.agents:\n if (\n not agent.active\n and not agent.done\n and self.step_count >= agent.spawn_delay\n ):\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n\n assert len(actions) == len(self.agents)\n\n step_rewards = np.zeros((len(self.agents)), dtype=np.float)\n\n self.step_count += 1\n\n iter_agents = list(enumerate(zip(self.agents, actions)))\n iter_order = np.arange(len(iter_agents))\n self.np_random.shuffle(iter_order)\n for shuffled_ix in iter_order:\n agent_no, (agent, action) = iter_agents[shuffled_ix]\n agent.step_reward = 0\n\n if agent.active:\n\n cur_pos = agent.pos[:]\n cur_cell = self.grid.get(*cur_pos)\n fwd_pos = agent.front_pos[:]\n fwd_cell = self.grid.get(*fwd_pos)\n agent_moved = False\n\n # Rotate left\n if action == agent.actions.left:\n agent.dir = (agent.dir - 1) % 4\n\n # Rotate right\n elif action == agent.actions.right:\n agent.dir = (agent.dir + 1) % 4\n\n # Move forward\n elif action == agent.actions.forward:\n # Under the follow conditions, the agent can move forward.\n can_move = fwd_cell is None or fwd_cell.can_overlap()\n if self.ghost_mode is False and isinstance(fwd_cell, GridAgent):\n can_move = False\n\n if can_move:\n agent_moved = True\n # Add agent to new cell\n if fwd_cell is None or isinstance(fwd_cell, Goal):\n self.grid.set(*fwd_pos, agent)\n agent.pos = fwd_pos\n else:\n fwd_cell.agents.append(agent)\n agent.pos = fwd_pos\n\n # Remove agent from old cell\n if cur_cell == agent:\n self.grid.set(*cur_pos, None)\n else:\n assert cur_cell.can_overlap()\n cur_cell.agents.remove(agent)\n\n # Add agent's agents to old cell\n for left_behind in agent.agents:\n cur_obj = self.grid.get(*cur_pos)\n if cur_obj is None:\n self.grid.set(*cur_pos, left_behind)\n elif cur_obj.can_overlap():\n cur_obj.agents.append(left_behind)\n else:\n raise ValueError(\n \"How was agent there in the first place?\"\n )\n\n # After moving, the agent shouldn't contain any other agents.\n agent.agents = []\n\n # Rewards can be got iff. fwd_cell has a \"get_reward\" method\n if hasattr(fwd_cell, \"get_reward\"):\n rwd = fwd_cell.get_reward(agent)\n\n # Modify the reward for less sensitive agent\n if agent_no == 0:\n rwd /= 10\n if bool(self.reward_decay):\n rwd *= 1.0 - 0.9 * (self.step_count / self.max_steps)\n step_rewards[agent_no] += rwd\n agent.reward(rwd)\n if rwd > 0:\n self.num_remained_apples -= 1\n\n # Pick up an object\n elif action == agent.actions.pickup:\n if fwd_cell and fwd_cell.can_pickup() and agent.carrying is None:\n agent.carrying = fwd_cell\n agent.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*fwd_pos, None)\n\n # Drop an object\n elif action == agent.actions.drop:\n if not fwd_cell and agent.carrying:\n self.grid.set(*fwd_pos, agent.carrying)\n agent.carrying.cur_pos = fwd_pos\n agent.carrying = None\n\n # Toggle/activate an object\n elif action == agent.actions.toggle:\n if fwd_cell:\n wasted = bool(fwd_cell.toggle(agent, fwd_pos))\n\n # Done action (not used by default)\n elif action == agent.actions.done:\n pass\n\n else:\n raise ValueError(f\"Environment can't handle action {action}.\")\n\n agent.on_step(fwd_cell if agent_moved else None)\n\n # If any of the agents individually are \"done\" (hit lava or in some cases a goal)\n # but the env requires respawning, then respawn those agents.\n for agent in self.agents:\n if agent.done:\n if self.respawn:\n resting_place_obj = self.grid.get(*agent.pos)\n if resting_place_obj == agent:\n if agent.agents:\n self.grid.set(*agent.pos, agent.agents[0])\n agent.agents[0].agents += agent.agents[1:]\n else:\n self.grid.set(*agent.pos, None)\n else:\n resting_place_obj.agents.remove(agent)\n resting_place_obj.agents += agent.agents[:]\n agent.agents = []\n\n agent.reset(new_episode=False)\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n else: # if the agent shouldn't be respawned, then deactivate it.\n agent.deactivate()\n\n # The episode overall is done if all the agents are done,\n # or if it exceeds the step limit or all the apples are collected.\n done = (\n (self.step_count >= self.max_steps)\n or all([agent.done for agent in self.agents])\n or self.num_remained_apples == 0\n )\n\n obs = [\n np.asarray(self.gen_agent_obs(agent), dtype=np.uint8)\n for agent in self.agents\n ]\n\n # Team reward\n step_rewards = np.array([np.sum(step_rewards) for _ in self.agents])\n\n return obs, step_rewards, done, {}\n"
] |
[
[
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caochenrui/USTC-ncov-AutoReport
|
[
"bb7042519d0c285d7612d38f483a64d4ac704397"
] |
[
"ustclogin.py"
] |
[
"from bs4 import BeautifulSoup\nimport requests\nfrom io import BytesIO\nimport pytesseract\nfrom PIL import Image\nimport numpy as np\nimport cv2\nfrom urllib.parse import unquote\nheaders={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'}\nclass Login:\n def __init__(self, stuid, password, service):\n self.stuid=stuid\n self.password=password\n self.service=service\n \n def get_LT(self):\n text=self.session.get('https://passport.ustc.edu.cn/validatecode.jsp?type=login',stream=True).content\n image=Image.open(BytesIO(text))\n image=cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR)\n kernel = np.ones((3,3),np.uint8)\n image = cv2.dilate(image,kernel,iterations = 1)\n image = cv2.erode(image,kernel,iterations = 1)\n return pytesseract.image_to_string(Image.fromarray(image))[:4]\n \n def passport(self):\n data=self.session.get('https://passport.ustc.edu.cn/login?service='+self.service,headers=headers)\n data=data.text\n data = data.encode('ascii','ignore').decode('utf-8','ignore')\n soup = BeautifulSoup(data, 'html.parser')\n CAS_LT = soup.find(\"input\", {\"name\": \"CAS_LT\"})['value']\n LT=self.get_LT()\n data = {\n 'model': 'uplogin.jsp',\n 'service': unquote(self.service),\n 'warn': '',\n 'showCode': '1',\n 'username': self.stuid,\n 'password': str(self.password),\n 'button': '',\n 'CAS_LT':CAS_LT,\n 'LT':LT\n }\n self.result=self.session.post('https://passport.ustc.edu.cn/login', data=data,headers=headers)\n \n def login(self):\n self.session=requests.Session()\n loginsuccess = False\n retrycount = 5\n while (not loginsuccess) and retrycount:\n self.passport()\n self.cookies = self.session.cookies\n retrycount = retrycount - 1\n if self.result.url=='https://passport.ustc.edu.cn/login':\n print(\"Login Failed! Retry...\")\n else:\n print(\"Login Successful!\")\n loginsuccess = True\n return loginsuccess\n"
] |
[
[
"numpy.asarray",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZhiningLiu1998/imbalanced-ensemble
|
[
"d6ed07689ad8c9cd27ecd589849f7122b98ab1a4",
"26670c8a6b7bab26ae1e18cba3174a9d9038a680",
"26670c8a6b7bab26ae1e18cba3174a9d9038a680",
"26670c8a6b7bab26ae1e18cba3174a9d9038a680",
"d6ed07689ad8c9cd27ecd589849f7122b98ab1a4"
] |
[
"imbalanced_ensemble/datasets/tests/test_zenodo.py",
"imbalanced_ensemble/ensemble/_bagging.py",
"imbalanced_ensemble/datasets/_imbalance.py",
"imbalanced_ensemble/ensemble/over_sampling/kmeans_smote_boost.py",
"docs/source/auto_examples/classification/plot_resampling_target.py"
] |
[
"\"\"\"Test the datasets loader.\n\nSkipped if datasets is not already downloaded to data_home.\n\"\"\"\n# Authors: Guillaume Lemaitre <[email protected]>\n# Christos Aridas\n# License: MIT\n\nimport pytest\n\nfrom imbalanced_ensemble.datasets import fetch_datasets\nfrom sklearn.utils._testing import SkipTest\n\nDATASET_SHAPE = {\n \"ecoli\": (336, 7),\n \"optical_digits\": (5620, 64),\n \"satimage\": (6435, 36),\n \"pen_digits\": (10992, 16),\n \"abalone\": (4177, 10),\n \"sick_euthyroid\": (3163, 42),\n \"spectrometer\": (531, 93),\n \"car_eval_34\": (1728, 21),\n \"isolet\": (7797, 617),\n \"us_crime\": (1994, 100),\n \"yeast_ml8\": (2417, 103),\n \"scene\": (2407, 294),\n \"libras_move\": (360, 90),\n \"thyroid_sick\": (3772, 52),\n \"coil_2000\": (9822, 85),\n \"arrhythmia\": (452, 278),\n \"solar_flare_m0\": (1389, 32),\n \"oil\": (937, 49),\n \"car_eval_4\": (1728, 21),\n \"wine_quality\": (4898, 11),\n \"letter_img\": (20000, 16),\n \"yeast_me2\": (1484, 8),\n \"webpage\": (34780, 300),\n \"ozone_level\": (2536, 72),\n \"mammography\": (11183, 6),\n \"protein_homo\": (145751, 74),\n \"abalone_19\": (4177, 10),\n}\n\n\ndef fetch(*args, **kwargs):\n return fetch_datasets(*args, download_if_missing=True, **kwargs)\n\n\[email protected]\ndef test_fetch():\n try:\n datasets1 = fetch(shuffle=True, random_state=42)\n except IOError:\n raise SkipTest(\"Zenodo dataset can not be loaded.\")\n\n datasets2 = fetch(shuffle=True, random_state=37)\n\n for k in DATASET_SHAPE.keys():\n\n X1, X2 = datasets1[k].data, datasets2[k].data\n assert DATASET_SHAPE[k] == X1.shape\n assert X1.shape == X2.shape\n\n y1, y2 = datasets1[k].target, datasets2[k].target\n assert (X1.shape[0],) == y1.shape\n assert (X1.shape[0],) == y2.shape\n\n\ndef test_fetch_filter():\n try:\n datasets1 = fetch(filter_data=tuple([1]), shuffle=True, random_state=42)\n except IOError:\n raise SkipTest(\"Zenodo dataset can not be loaded.\")\n\n datasets2 = fetch(filter_data=tuple([\"ecoli\"]), shuffle=True, random_state=37)\n\n X1, X2 = datasets1[\"ecoli\"].data, datasets2[\"ecoli\"].data\n assert DATASET_SHAPE[\"ecoli\"] == X1.shape\n assert X1.shape == X2.shape\n\n assert X1.sum() == pytest.approx(X2.sum())\n\n y1, y2 = datasets1[\"ecoli\"].target, datasets2[\"ecoli\"].target\n assert (X1.shape[0],) == y1.shape\n assert (X1.shape[0],) == y2.shape\n\n\[email protected](\n \"filter_data, err_msg\",\n [\n ((\"rnf\",), \"is not a dataset available\"),\n ((-1,), \"dataset with the ID=\"),\n ((100,), \"dataset with the ID=\"),\n ((1.00,), \"value in the tuple\"),\n ],\n)\ndef test_fetch_error(filter_data, err_msg):\n with pytest.raises(ValueError, match=err_msg):\n fetch_datasets(filter_data=filter_data)\n",
"\"\"\"Base classes for all bagging-like methods in imbalanced_ensemble.\n\nResampleBaggingClassifier Base class for all resampling + \nbagging imbalanced ensemble classifier.\n\"\"\"\n\n# Authors: Zhining Liu <[email protected]>\n# License: MIT\n\n# %%\n\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nimport numbers\nimport itertools\nfrom warnings import warn\nfrom joblib import Parallel\n\nfrom sklearn.base import clone\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble._base import _partition_estimators\nfrom sklearn.pipeline import Pipeline as skPipeline\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.validation import _check_sample_weight, has_fit_parameter\nfrom sklearn.utils.fixes import delayed\nfrom sklearn.utils.random import sample_without_replacement\n\n\nfrom .base import ImbalancedEnsembleClassifierMixin, MAX_INT\nfrom ..pipeline import Pipeline\nfrom ..utils._validation_data import check_eval_datasets\nfrom ..utils._validation_param import (check_train_verbose, \n check_eval_metrics, \n check_type)\nfrom ..utils._validation import (_deprecate_positional_args, \n check_sampling_strategy, \n check_target_type)\nfrom ..utils._docstring import (FuncSubstitution, \n FuncGlossarySubstitution,\n _get_parameter_docstring)\n\n# # For local test\n# import sys\n# sys.path.append(\"..\")\n# from ensemble.base import ImbalancedEnsembleClassifierMixin, MAX_INT\n# from pipeline import Pipeline\n# from utils._validation_data import check_eval_datasets\n# from utils._validation_param import (check_train_verbose, \n# check_eval_metrics, \n# check_type)\n# from utils._validation import (_deprecate_positional_args, \n# check_sampling_strategy, \n# check_target_type)\n# from utils._docstring import (FuncSubstitution, \n# _get_parameter_docstring)\n\n\ndef _generate_indices(random_state, bootstrap, n_population, n_samples):\n \"\"\"Draw randomly sampled indices.\"\"\"\n # Draw sample indices\n if bootstrap:\n indices = random_state.randint(0, n_population, n_samples)\n else:\n indices = sample_without_replacement(n_population, n_samples,\n random_state=random_state)\n\n return indices\n\n\ndef _generate_bagging_indices(random_state, bootstrap_features,\n bootstrap_samples, n_features, n_samples,\n max_features, max_samples):\n \"\"\"Randomly draw feature and sample indices.\"\"\"\n # Get valid random state\n random_state = check_random_state(random_state)\n\n # Draw indices\n feature_indices = _generate_indices(random_state, bootstrap_features,\n n_features, max_features)\n sample_indices = _generate_indices(random_state, bootstrap_samples,\n n_samples, max_samples)\n\n return feature_indices, sample_indices\n \n\ndef _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,\n seeds, total_n_estimators, verbose):\n \"\"\"Private function used to build a batch of estimators within a job.\"\"\"\n # Retrieve settings\n n_samples, n_features = X.shape\n max_features = ensemble._max_features\n max_samples = ensemble._max_samples\n bootstrap = ensemble.bootstrap\n bootstrap_features = ensemble.bootstrap_features\n \n # Check if the base_estimator supports sample_weight\n base_estimator_ = ensemble.base_estimator_\n while (isinstance(base_estimator_, skPipeline)): # for Pipelines\n base_estimator_ = base_estimator_._final_estimator\n support_sample_weight = has_fit_parameter(base_estimator_, \"sample_weight\")\n if not support_sample_weight and sample_weight is not None:\n raise ValueError(\"The base estimator doesn't support sample weight\")\n\n # Build estimators\n estimators = []\n estimators_features = []\n estimators_n_training_samples = []\n\n for i in range(n_estimators):\n if verbose > 1:\n print(\"Building estimator %d of %d for this parallel run \"\n \"(total %d)...\" % (i + 1, n_estimators, total_n_estimators))\n\n random_state = seeds[i]\n estimator = ensemble._make_estimator(append=False,\n random_state=random_state)\n\n # Draw random feature, sample indices\n features, indices = _generate_bagging_indices(random_state,\n bootstrap_features,\n bootstrap, n_features,\n n_samples, max_features,\n max_samples)\n\n # Draw samples, using sample weights, and then fit\n if support_sample_weight:\n if sample_weight is None:\n curr_sample_weight = np.ones((n_samples,))\n else:\n curr_sample_weight = sample_weight.copy()\n\n estimator.fit((X[indices])[:, features], y[indices], \n sample_weight=curr_sample_weight[indices])\n\n else:\n estimator.fit((X[indices])[:, features], y[indices])\n \n if hasattr(estimator, 'n_training_samples_'):\n n_training_samples = getattr(estimator, 'n_training_samples_')\n else: n_training_samples = len(indices)\n\n estimators.append(estimator)\n estimators_features.append(features)\n estimators_n_training_samples.append(n_training_samples)\n\n return estimators, estimators_features, estimators_n_training_samples\n\n\n_super = BaggingClassifier\n\nclass ResampleBaggingClassifier(ImbalancedEnsembleClassifierMixin,\n BaggingClassifier, metaclass=ABCMeta):\n \"\"\"Base class for all resampling + bagging imbalanced ensemble classifier.\n \n Warning: This class should not be used directly. Use the derive classes\n instead.\n \"\"\"\n\n _ensemble_type = 'bagging'\n _solution_type = 'resampling'\n _training_type = 'parallel'\n\n _properties = {\n 'ensemble_type': _ensemble_type,\n 'solution_type': _solution_type,\n 'training_type': _training_type,\n }\n\n @_deprecate_positional_args\n def __init__(self,\n base_estimator=None,\n n_estimators=10,\n *,\n base_sampler,\n sampling_type,\n sampling_strategy=\"auto\",\n max_samples=1.0,\n max_features=1.0,\n bootstrap=True,\n bootstrap_features=False,\n oob_score=False,\n warm_start=False,\n n_jobs=None,\n random_state=None,\n verbose=0,):\n\n self.sampling_strategy = sampling_strategy\n self._sampling_type = sampling_type\n self.base_sampler = base_sampler\n\n super().__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n max_samples=max_samples,\n max_features=max_features,\n bootstrap=bootstrap,\n bootstrap_features=bootstrap_features,\n oob_score=oob_score,\n warm_start=warm_start,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n )\n\n\n def _validate_y(self, y):\n \"\"\"Validate the label vector.\"\"\"\n y_encoded = super()._validate_y(y)\n if (\n isinstance(self.sampling_strategy, dict)\n and self.base_sampler_._sampling_type != \"bypass\"\n ):\n self._sampling_strategy = {\n np.where(self.classes_ == key)[0][0]: value\n for key, value in check_sampling_strategy(\n self.sampling_strategy,\n y,\n self.base_sampler_._sampling_type,\n ).items()\n }\n else:\n self._sampling_strategy = self.sampling_strategy\n return y_encoded\n\n\n def _validate_estimator(self, default=DecisionTreeClassifier()):\n \"\"\"Check the estimator and the n_estimator attribute, set the\n `base_estimator_` attribute.\"\"\"\n if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):\n raise ValueError(\n f\"n_estimators must be an integer, \" f\"got {type(self.n_estimators)}.\"\n )\n\n if self.n_estimators <= 0:\n raise ValueError(\n f\"n_estimators must be greater than zero, \" f\"got {self.n_estimators}.\"\n )\n\n if self.base_estimator is not None:\n base_estimator = clone(self.base_estimator)\n else:\n base_estimator = clone(default)\n \n # validate sampler and sampler_kwargs\n # validated sampler stored in self.base_sampler_\n try:\n self.base_sampler_ = clone(self.base_sampler)\n except Exception as e:\n e_args = list(e.args)\n e_args[0] = \"Exception occurs when trying to validate\" + \\\n \" base_sampler: \" + e_args[0]\n e.args = tuple(e_args)\n raise e\n\n if self.base_sampler_._sampling_type != \"bypass\":\n self.base_sampler_.set_params(sampling_strategy=self._sampling_strategy)\n self.base_sampler_.set_params(**self.sampler_kwargs_)\n\n self.base_estimator_ = Pipeline(\n [\n (\"sampler\", self.base_sampler_),\n (\"classifier\", base_estimator),\n ]\n )\n\n\n def _more_tags(self):\n tags = super()._more_tags()\n tags_key = \"_xfail_checks\"\n failing_test = \"check_estimators_nan_inf\"\n reason = \"Fails because the sampler removed infinity and NaN values\"\n if tags_key in tags:\n tags[tags_key][failing_test] = reason\n else:\n tags[tags_key] = {failing_test: reason}\n return tags\n \n\n @_deprecate_positional_args\n @FuncSubstitution(\n eval_datasets=_get_parameter_docstring('eval_datasets'),\n eval_metrics=_get_parameter_docstring('eval_metrics'),\n train_verbose=_get_parameter_docstring('train_verbose', **_properties),\n )\n def _fit(self, X, y, \n *,\n sample_weight=None, \n sampler_kwargs:dict={},\n max_samples=None,\n eval_datasets:dict=None,\n eval_metrics:dict=None,\n train_verbose:bool or int or dict,\n ):\n \"\"\"Build a Bagging ensemble of estimators from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only if\n they are supported by the base estimator.\n\n y : array-like of shape (n_samples,)\n The target values (class labels in classification, real numbers in\n regression).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if the base estimator supports\n sample weighting.\n \n sampler_kwargs : dict, default={}\n The kwargs to use as additional parameters when instantiating a\n new sampler. If none are given, default parameters are used.\n \n max_samples : int or float, default=None\n Argument to use instead of self.max_samples.\n \n %(eval_datasets)s\n \n %(eval_metrics)s\n \n %(train_verbose)s\n\n Returns\n -------\n self : object\n \"\"\"\n \n # Check data, sampler_kwargs and random_state\n check_target_type(y)\n\n self.sampler_kwargs_ = check_type(\n sampler_kwargs, 'sampler_kwargs', dict)\n\n random_state = check_random_state(self.random_state)\n\n # Convert data (X is required to be 2d and indexable)\n check_x_y_args = {\n 'accept_sparse': ['csr', 'csc'],\n 'dtype': None,\n 'force_all_finite': False,\n 'multi_output': True,\n }\n X, y = self._validate_data(X, y, **check_x_y_args)\n \n # Check evaluation data\n self.eval_datasets_ = check_eval_datasets(eval_datasets, X, y, **check_x_y_args)\n \n # Check evaluation metrics\n self.eval_metrics_ = check_eval_metrics(eval_metrics)\n\n # Check verbose\n self.train_verbose_ = check_train_verbose(\n train_verbose, self.n_estimators, **self._properties)\n self._init_training_log_format()\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X, dtype=None)\n\n # Remap output\n n_samples, self.n_features_in_ = X.shape\n self._n_samples = n_samples\n y = self._validate_y(y)\n\n # Check parameters\n self._validate_estimator()\n\n # Validate max_samples\n if max_samples is None:\n max_samples = self.max_samples\n if not isinstance(max_samples, numbers.Integral):\n max_samples = int(max_samples * X.shape[0])\n\n if not (0 < max_samples <= X.shape[0]):\n raise ValueError(\"max_samples must be in (0, n_samples]\")\n\n # Store validated integer row sampling value\n self._max_samples = max_samples\n\n # Validate max_features\n if isinstance(self.max_features, numbers.Integral):\n max_features = self.max_features\n elif isinstance(self.max_features, float):\n max_features = self.max_features * self.n_features_in_\n else:\n raise ValueError(\"max_features must be int or float\")\n\n if not (0 < max_features <= self.n_features_in_):\n raise ValueError(\"max_features must be in (0, n_features]\")\n\n max_features = max(1, int(max_features))\n\n # Store validated integer feature sampling value\n self._max_features = max_features\n\n # Other checks\n if not self.bootstrap and self.oob_score:\n raise ValueError(\"Out of bag estimation only available\"\n \" if bootstrap=True\")\n\n if self.warm_start and self.oob_score:\n raise ValueError(\"Out of bag estimate only available\"\n \" if warm_start=False\")\n\n if hasattr(self, \"oob_score_\") and self.warm_start:\n del self.oob_score_\n\n if not self.warm_start or not hasattr(self, 'estimators_'):\n # Free allocated memory, if any\n self.estimators_ = []\n self.estimators_features_ = []\n self.estimators_n_training_samples_ = []\n\n n_more_estimators = self.n_estimators - len(self.estimators_)\n\n if n_more_estimators < 0:\n raise ValueError('n_estimators=%d must be larger or equal to '\n 'len(estimators_)=%d when warm_start==True'\n % (self.n_estimators, len(self.estimators_)))\n\n elif n_more_estimators == 0:\n warn(\"Warm-start fitting without increasing n_estimators does not \"\n \"fit new trees.\")\n return self\n\n # Parallel loop\n n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators,\n self.n_jobs)\n total_n_estimators = sum(n_estimators)\n\n # Advance random state to state after training\n # the first n_estimators\n if self.warm_start and len(self.estimators_) > 0:\n random_state.randint(MAX_INT, size=len(self.estimators_))\n\n seeds = random_state.randint(MAX_INT, size=n_more_estimators)\n self._seeds = seeds\n\n all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose,\n **self._parallel_args())(\n delayed(_parallel_build_estimators)(\n n_estimators[i],\n self,\n X,\n y,\n sample_weight,\n seeds[starts[i]:starts[i + 1]],\n total_n_estimators,\n verbose=self.verbose)\n for i in range(n_jobs))\n\n # Reduce\n self.estimators_ += list(itertools.chain.from_iterable(\n t[0] for t in all_results))\n self.estimators_features_ += list(itertools.chain.from_iterable(\n t[1] for t in all_results))\n self.estimators_n_training_samples_ += list(itertools.chain.from_iterable(\n t[2] for t in all_results))\n\n if self.oob_score:\n self._set_oob_score(X, y)\n \n # Print training infomation to console.\n self._training_log_to_console()\n\n return self\n\n\n @abstractmethod\n def fit(self, X, y, sample_weight, **kwargs):\n \"\"\"Needs to be implemented in the derived class\"\"\"\n pass\n\n\n @FuncGlossarySubstitution(_super.predict_log_proba, 'classes_')\n def predict_log_proba(self, X):\n return super().predict_log_proba(X)\n\n\n @FuncGlossarySubstitution(_super.predict_proba, 'classes_')\n def predict_proba(self, X):\n return super().predict_proba(X)\n\n\n @FuncGlossarySubstitution(_super.predict_proba, 'classes_')\n def set_params(self, **params):\n return super().set_params(**params)\n",
"\"\"\"Transform a dataset into an imbalanced dataset.\"\"\"\n# Adapted from imbalanced-learn\n\n# Authors: Dayvid Oliveira\n# Guillaume Lemaitre <[email protected]>\n# Christos Aridas\n# Zhining Liu <[email protected]>\n# License: MIT\n\nfrom collections import Counter\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\n\nfrom ..sampler.under_sampling import RandomUnderSampler\nfrom ..utils import check_sampling_strategy\nfrom ..utils._validation import _deprecate_positional_args\n\n\n@_deprecate_positional_args\ndef make_imbalance(\n X, y, *, sampling_strategy=None, random_state=None, verbose=False, **kwargs\n):\n \"\"\"Turns a dataset into an imbalanced dataset with a specific sampling\n strategy.\n\n A simple toy dataset to visualize clustering and classification\n algorithms.\n\n Read more in the `User Guide <https://imbalanced-learn.org/stable/datasets/index.html#make-imbalanced>`_.\n\n Parameters\n ----------\n X : {array-like, dataframe} of shape (n_samples, n_features)\n Matrix containing the data to be imbalanced.\n\n y : ndarray of shape (n_samples,)\n Corresponding label for each sample in X.\n\n sampling_strategy : dict or callable,\n Ratio to use for resampling the data set.\n\n - When ``dict``, the keys correspond to the targeted classes. The\n values correspond to the desired number of samples for each targeted\n class.\n\n - When callable, function taking ``y`` and returns a ``dict``. The keys\n correspond to the targeted classes. The values correspond to the\n desired number of samples for each class.\n\n random_state : int, RandomState instance or None, default=None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by np.random.\n\n verbose : bool, default=False\n Show information regarding the sampling.\n\n kwargs : dict\n Dictionary of additional keyword arguments to pass to\n ``sampling_strategy``.\n\n Returns\n -------\n X_resampled : {ndarray, dataframe} of shape (n_samples_new, n_features)\n The array containing the imbalanced data.\n\n y_resampled : ndarray of shape (n_samples_new)\n The corresponding label of `X_resampled`.\n\n Notes\n -----\n See :ref:`sphx_glr_auto_examples_datasets_plot_make_imbalance.py` for an example.\n\n Examples\n --------\n >>> from collections import Counter\n >>> from sklearn.datasets import load_iris\n >>> from imbalanced_ensemble.datasets import make_imbalance\n\n >>> data = load_iris()\n >>> X, y = data.data, data.target\n >>> print(f'Distribution before imbalancing: {Counter(y)}')\n Distribution before imbalancing: Counter({0: 50, 1: 50, 2: 50})\n >>> X_res, y_res = make_imbalance(X, y,\n ... sampling_strategy={0: 10, 1: 20, 2: 30},\n ... random_state=42)\n >>> print(f'Distribution after imbalancing: {Counter(y_res)}')\n Distribution after imbalancing: Counter({2: 30, 1: 20, 0: 10})\n \"\"\"\n target_stats = Counter(y)\n # restrict ratio to be a dict or a callable\n if isinstance(sampling_strategy, dict) or callable(sampling_strategy):\n sampling_strategy_ = check_sampling_strategy(\n sampling_strategy, y, \"under-sampling\", **kwargs\n )\n else:\n raise ValueError(\n f\"'sampling_strategy' has to be a dictionary or a \"\n f\"function returning a dictionary. Got {type(sampling_strategy)} \"\n f\"instead.\"\n )\n\n if verbose:\n print(f\"The original target distribution in the dataset is: {target_stats}\")\n rus = RandomUnderSampler(\n sampling_strategy=sampling_strategy_,\n replacement=False,\n random_state=random_state,\n )\n X_resampled, y_resampled = rus.fit_resample(X, y)\n if verbose:\n print(f\"Make the dataset imbalanced: {Counter(y_resampled)}\")\n\n return X_resampled, y_resampled\n\n \ndef generate_imbalance_data(n_samples=200, weights=[.9,.1], \n test_size=.5, random_state=None, kwargs={}):\n \"\"\"Generate a random n-classes imbalanced classification problem.\n\n Returns the training and test data and labels.\n\n Parameters\n ----------\n n_samples : int, default=100\n The number of samples.\n\n weights : array-like of shape (n_classes,), default=[.9,.1]\n The proportions of samples assigned to each class, i.e., \n it determines the imbalance ratio between classes.\n If None, then classes are balanced.\n Note that the number of class will be automatically set\n to the length of weights.\n\n test_size : float or int, default=None\n If float, should be between 0.0 and 1.0 and represent the \n proportion of the dataset to include in the test split. \n If int, represents the absolute number of test samples. \n\n random_state : int, RandomState instance or None, default=None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by np.random.\n\n kwargs : dict\n Dictionary of additional keyword arguments to pass to\n ``sklearn.datasets.make_classification``. \n Please see details `here <https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html#sklearn.datasets.make_classification>`_.\n\n Returns\n -------\n X_train : {ndarray, dataframe} of shape (n_samples*(1-test_size), n_features)\n The array containing the imbalanced training data.\n\n X_test : {ndarray, dataframe} of shape (n_samples*test_size, n_features)\n The array containing the imbalanced test data.\n\n y_train : ndarray of shape (n_samples*(1-test_size))\n The corresponding label of `X_train`.\n\n y_test : ndarray of shape (n_samples*test_size)\n The corresponding label of `X_test`.\n\n \"\"\"\n X, y = make_classification(\n n_classes=len(weights), \n n_samples=n_samples, \n weights=weights,\n random_state=random_state,\n **kwargs\n )\n return train_test_split(\n X, y, test_size=test_size, stratify=y,\n random_state=random_state)",
"\"\"\"KmeansSMOTEBoostClassifier: Kmeans-SMOTE over-sampling \nintegrated in the learning of AdaBoost.\n\"\"\"\n\n# Authors: Zhining Liu <[email protected]>\n# License: MIT\n\n# %%\n\n\nimport numbers\n\n\nfrom .._boost import ResampleBoostClassifier\nfrom ...sampler.over_sampling import KMeansSMOTE\nfrom ...utils._validation_param import check_type\nfrom ...utils._validation import _deprecate_positional_args\nfrom ...utils._docstring import (Substitution, FuncSubstitution, \n _get_parameter_docstring, \n _get_example_docstring)\n\n# # For local test\n# import sys\n# sys.path.append(\"../..\")\n# from ensemble._boost import ResampleBoostClassifier\n# from sampler.over_sampling import KMeansSMOTE\n# from utils._validation_param import check_type\n# from utils._validation import _deprecate_positional_args\n# from utils._docstring import (Substitution, FuncSubstitution, \n# _get_parameter_docstring, \n# _get_example_docstring)\n\n\n# Properties\n_method_name = 'KmeansSMOTEBoostClassifier'\n_sampler_class = KMeansSMOTE\n\n_solution_type = ResampleBoostClassifier._solution_type\n_sampling_type = 'over-sampling'\n_ensemble_type = ResampleBoostClassifier._ensemble_type\n_training_type = ResampleBoostClassifier._training_type\n\n_properties = {\n 'solution_type': _solution_type,\n 'sampling_type': _sampling_type,\n 'ensemble_type': _ensemble_type,\n 'training_type': _training_type,\n}\n\n\n@Substitution(\n n_jobs_sampler=_get_parameter_docstring('n_jobs_sampler', **_properties),\n early_termination=_get_parameter_docstring('early_termination', **_properties),\n random_state=_get_parameter_docstring('random_state', **_properties),\n example=_get_example_docstring(_method_name)\n)\nclass KmeansSMOTEBoostClassifier(ResampleBoostClassifier):\n \"\"\"Kmeans-SMOTE over-sampling integrated in the learning of AdaBoost.\n\n KmeansSMOTEBoost is similar to SMOTEBoost [1]_, but use KmeansSMOTE \n instead of SMOTE. It alleviates the problem of class balancing by \n KmeansSMOTE over-sampling the sample at each iteration of the boosting algorithm.\n\n This KmeansSMOTEBoost implementation supports multi-class classification.\n\n Parameters\n ----------\n base_estimator : estimator object, default=None\n The base estimator from which the boosted ensemble is built.\n Support for sample weighting is required, as well as proper\n ``classes_`` and ``n_classes_`` attributes. If ``None``, then\n the base estimator is ``DecisionTreeClassifier(max_depth=1)``.\n\n n_estimators : int, default=50\n The maximum number of estimators at which boosting is terminated.\n In case of perfect fit, the learning procedure is stopped early.\n\n k_neighbors : int or object, default=2\n If ``int``, number of nearest neighbors to used to construct synthetic\n samples in KmeansSMOTE. If object, an estimator that inherits from\n :class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to\n find the k_neighbors.\n \n {n_jobs_sampler}\n\n kmeans_estimator : int or object, default=None\n A KMeans instance or the number of clusters to be used. By default,\n we used a :class:`~sklearn.cluster.MiniBatchKMeans` which tend to be\n better with large number of samples.\n\n cluster_balance_threshold : \"auto\" or float, default=\"auto\"\n The threshold at which a cluster is called balanced and where samples\n of the class selected for SMOTE will be oversampled. If \"auto\", this\n will be determined by the ratio for each class, or it can be set\n manually.\n\n density_exponent : \"auto\" or float, default=\"auto\"\n This exponent is used to determine the density of a cluster. Leaving\n this to \"auto\" will use a feature-length based exponent.\n\n learning_rate : float, default=1.0\n Learning rate shrinks the contribution of each classifier by\n ``learning_rate``. There is a trade-off between ``learning_rate`` and\n ``n_estimators``.\n\n algorithm : {{'SAMME', 'SAMME.R'}}, default='SAMME.R'\n If 'SAMME.R' then use the SAMME.R real boosting algorithm.\n ``base_estimator`` must support calculation of class probabilities.\n If 'SAMME' then use the SAMME discrete boosting algorithm.\n The SAMME.R algorithm typically converges faster than SAMME,\n achieving a lower test error with fewer boosting iterations.\n \n {early_termination}\n\n {random_state}\n\n Attributes\n ----------\n base_estimator_ : estimator\n The base estimator from which the ensemble is grown.\n\n base_sampler_ : KMeansSMOTE\n The base sampler.\n\n estimators_ : list of classifiers\n The collection of fitted sub-estimators.\n\n samplers_ : list of KMeansSMOTE\n The collection of used samplers.\n\n classes_ : ndarray of shape (n_classes,)\n The classes labels.\n\n n_classes_ : int\n The number of classes.\n\n estimator_weights_ : ndarray of shape (n_estimator,)\n Weights for each estimator in the boosted ensemble.\n\n estimator_errors_ : ndarray of shape (n_estimator,)\n Classification error for each estimator in the boosted\n ensemble.\n \n estimators_n_training_samples_ : list of ints\n The number of training samples for each fitted \n base estimators.\n\n feature_importances_ : ndarray of shape (n_features,)\n The feature importances if supported by the ``base_estimator``.\n\n See Also\n --------\n SMOTEBoostClassifier : SMOTE over-sampling integrated in AdaBoost.\n\n OverBoostClassifier : Random over-sampling integrated in AdaBoost.\n \n SMOTEBaggingClassifier : Bagging with intergrated SMOTE over-sampling.\n\n References\n ----------\n .. [1] Chawla, N. V., Lazarevic, A., Hall, L. O., & Bowyer, K. W. \n \"SMOTEBoost: Improving prediction of the minority class in boosting.\" \n European conference on principles of data mining and knowledge discovery. \n Springer, Berlin, Heidelberg, (2003): 107-119.\n\n Examples\n --------\n {example}\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self,\n base_estimator=None,\n n_estimators:int=50,\n *,\n k_neighbors:int=2,\n n_jobs_sampler=None,\n kmeans_estimator=None,\n cluster_balance_threshold=\"auto\",\n density_exponent=\"auto\",\n learning_rate:float=1.,\n algorithm:str='SAMME.R',\n early_termination:bool=False,\n random_state=None):\n \n base_sampler = _sampler_class()\n sampling_type = _sampling_type\n\n super(KmeansSMOTEBoostClassifier, self).__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n base_sampler=base_sampler,\n sampling_type=sampling_type,\n learning_rate=learning_rate,\n algorithm=algorithm,\n early_termination=early_termination,\n random_state=random_state)\n \n self.__name__ = _method_name\n self._sampling_type = _sampling_type\n self._sampler_class = _sampler_class\n self._properties = _properties\n\n self.k_neighbors = k_neighbors\n self.k_neighbors_ = check_type(k_neighbors, 'k_neighbors', numbers.Integral)\n self.n_jobs_sampler = n_jobs_sampler\n self.kmeans_estimator = kmeans_estimator\n self.cluster_balance_threshold = cluster_balance_threshold\n self.density_exponent = density_exponent\n\n\n @_deprecate_positional_args\n @FuncSubstitution(\n target_label=_get_parameter_docstring('target_label', **_properties),\n n_target_samples=_get_parameter_docstring('n_target_samples', **_properties),\n balancing_schedule=_get_parameter_docstring('balancing_schedule'),\n eval_datasets=_get_parameter_docstring('eval_datasets'),\n eval_metrics=_get_parameter_docstring('eval_metrics'),\n train_verbose=_get_parameter_docstring('train_verbose', **_properties),\n )\n def fit(self, X, y, \n *,\n sample_weight=None, \n target_label:int=None, \n n_target_samples:int or dict=None, \n balancing_schedule:str or function='uniform',\n eval_datasets:dict=None,\n eval_metrics:dict=None,\n train_verbose:bool or int or dict=False,\n ):\n \"\"\"Build a KmeansSMOTEBoost classifier from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. DOK and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n The target values (class labels).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, the sample weights are initialized to\n ``1 / n_samples``.\n \n %(target_label)s\n \n %(n_target_samples)s\n \n %(balancing_schedule)s\n \n %(eval_datasets)s\n \n %(eval_metrics)s\n \n %(train_verbose)s\n\n Returns\n -------\n self : object\n \"\"\"\n \n kmeans_smote_sampler_kwargs = {\n 'k_neighbors': self.k_neighbors_,\n 'n_jobs': self.n_jobs_sampler,\n 'kmeans_estimator': self.kmeans_estimator,\n 'cluster_balance_threshold': self.cluster_balance_threshold,\n 'density_exponent': self.density_exponent,\n }\n update_x_y_after_resample = True\n \n return self._fit(X, y, \n sample_weight=sample_weight, \n sampler_kwargs=kmeans_smote_sampler_kwargs,\n update_x_y_after_resample=update_x_y_after_resample,\n target_label=target_label, \n n_target_samples=n_target_samples, \n balancing_schedule=balancing_schedule,\n eval_datasets=eval_datasets,\n eval_metrics=eval_metrics,\n train_verbose=train_verbose,\n )\n\n\n# %%\n\nif __name__ == '__main__':\n from collections import Counter\n from copy import copy\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.datasets import make_classification\n from sklearn.model_selection import train_test_split\n from sklearn.metrics import accuracy_score, balanced_accuracy_score, f1_score\n \n # X, y = make_classification(n_classes=2, class_sep=2, # 2-class\n # weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,\n # n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)\n X, y = make_classification(n_classes=3, class_sep=2, # 3-class\n weights=[0.1, 0.3, 0.6], n_informative=3, n_redundant=1, flip_y=0,\n n_features=20, n_clusters_per_class=1, n_samples=2000, random_state=10)\n\n X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.5, random_state=42)\n\n origin_distr = dict(Counter(y_train)) # {2: 600, 1: 300, 0: 100}\n print('Original training dataset shape %s' % origin_distr)\n\n init_kwargs_default = {\n 'base_estimator': None,\n # 'base_estimator': DecisionTreeClassifier(max_depth=2),\n 'n_estimators': 100,\n 'k_neighbors': 5,\n 'learning_rate': 1.,\n 'algorithm': 'SAMME.R',\n 'random_state': 10,\n # 'random_state': None,\n }\n fit_kwargs_default = {\n 'X': X_train,\n 'y': y_train,\n 'sample_weight': None,\n 'target_label': None,\n 'n_target_samples': None,\n # 'n_target_samples': target_distr,\n 'balancing_schedule': 'uniform',\n 'eval_datasets': {'valid': (X_valid, y_valid)},\n 'eval_metrics': {\n 'acc': (accuracy_score, {}),\n 'balanced_acc': (balanced_accuracy_score, {}),\n 'weighted_f1': (f1_score, {'average':'weighted'}),},\n 'train_verbose': {\n 'granularity': 10,\n 'print_distribution': True,\n 'print_metrics': True,},\n }\n\n ensembles = {}\n\n init_kwargs, fit_kwargs = copy(init_kwargs_default), copy(fit_kwargs_default)\n kmsmoteboost = KmeansSMOTEBoostClassifier(**init_kwargs).fit(**fit_kwargs)\n ensembles['kmsmoteboost'] = kmsmoteboost\n\n init_kwargs, fit_kwargs = copy(init_kwargs_default), copy(fit_kwargs_default)\n fit_kwargs.update({\n 'balancing_schedule': 'progressive'\n })\n kmsmoteboost_prog = KmeansSMOTEBoostClassifier(**init_kwargs).fit(**fit_kwargs)\n ensembles['kmsmoteboost_prog'] = kmsmoteboost_prog\n\n\n # %%\n from imbalanced_ensemble.visualizer import ImbalancedEnsembleVisualizer\n\n visualizer = ImbalancedEnsembleVisualizer(\n eval_datasets = None,\n eval_metrics = None,\n ).fit(\n ensembles = ensembles,\n granularity = 5,\n )\n fig, axes = visualizer.performance_lineplot(\n on_ensembles=None,\n on_datasets=None,\n split_by=[],\n n_samples_as_x_axis=False,\n sub_figsize=(4, 3.3),\n sup_title=True,\n alpha=0.8,\n )\n fig, axes = visualizer.confusion_matrix_heatmap(\n on_ensembles=None,\n on_datasets=None,\n sub_figsize=(4, 3.3),\n )\n\n # %%",
"\"\"\"\n=========================================================\nCustomize resampling target\n=========================================================\n\nThis example demonstrates how to customize the resampling target to achieve advanced resampling control.\nThis can be easily done by setting the \"target_label\" and \"n_target_samples\" parameter when calling the \"fit()\" method. \n\nNote that this feature only applies to resampling-based ensemble classifiers that are iteratively trained.\n\nThis example uses:\n\n - :class:`imbalanced_ensemble.ensemble.SelfPacedEnsembleClassifier`\n - :class:`imbalanced_ensemble.ensemble.SMOTEBoostClassifier`\n\"\"\"\n\n# Authors: Zhining Liu <[email protected]>\n# License: MIT\n\n# %%\nprint(__doc__)\n\n# Import imbalanced_ensemble\nimport imbalanced_ensemble as imbens\n\n# Import utilities\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\nfrom imbalanced_ensemble.ensemble.base import sort_dict_by_key\nfrom collections import Counter\n\n# Import plot utilities\nfrom imbalanced_ensemble.utils._plot import set_ax_border\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context('talk')\n\nRANDOM_STATE = 42\n\ninit_kwargs = {\n 'n_estimators': 1,\n 'random_state': RANDOM_STATE,\n}\nfit_kwargs = {\n 'train_verbose': {\n 'print_metrics': False,\n },\n}\n\n# sphinx_gallery_thumbnail_number = -1\n\n# %% [markdown]\n# Prepare data\n# ------------------------------\n# Make a toy 3-class imbalanced classification task.\n\n# Generate and split a synthetic dataset\nX, y = make_classification(n_classes=3, n_samples=2000, class_sep=2,\n weights=[0.1, 0.3, 0.6], n_informative=3, n_redundant=1, flip_y=0,\n n_features=20, n_clusters_per_class=2, random_state=RANDOM_STATE)\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, \n test_size=0.5, stratify=y, random_state=RANDOM_STATE)\n\n# Print class distribution\nprint('Training dataset distribution %s' % sort_dict_by_key(Counter(y_train)))\nprint('Validation dataset distribution %s' % sort_dict_by_key(Counter(y_valid)))\n\n# %% [markdown]\n# Implement some plot utilities\n\nylim = (0, 630)\n\nall_distribution = {}\n\ndef plot_class_distribution(distr:dict, xlabel:str='Class Label', \n ylabel:str='Number of samples', **kwargs):\n distr = dict(sorted(distr.items(), key=lambda k: k[0], reverse=True))\n ax = sns.barplot(\n x=list(distr.keys()), \n y=list(distr.values()),\n order=list(distr.keys()),\n **kwargs\n )\n set_ax_border(ax)\n ax.grid(axis='y', alpha=0.5, ls='-.')\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n return ax\n\ndef plot_class_distribution_comparison(clf, \n title1='Original imbalanced class distribution', \n title2='After resampling', figsize=(12, 6)):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)\n plot_class_distribution(clf.origin_distr_, ax=ax1)\n ax1.set(ylim=ylim, title=title1)\n plot_class_distribution(clf.target_distr_, ax=ax2)\n ax2.set(ylim=ylim, title=title2)\n fig.tight_layout()\n\n\n# %% [markdown]\n# Default under-sampling\n# ----------------------------\n# By default, under-sampling-based ensemble methods will consider the smallest class as the minority class (class 0 with 100 samples). \n# All other classes (class 1 and 2) will be considered as majority classes and will be under-sampled until the number of samples is equalized. \n\n# %% [markdown]\n# Take ``SelfPacedEnsembleClassifier`` as example\n\nspe_clf = imbens.ensemble.SelfPacedEnsembleClassifier(**init_kwargs)\n\n\n# %% [markdown]\n# **Train with the default under-sampling setting**\n\nspe_clf.fit(X_train, y_train, **fit_kwargs)\n\nall_distribution['Before under-sampling'] = spe_clf.origin_distr_\nresampling_type='After default under-sampling'\nall_distribution[resampling_type] = spe_clf.target_distr_\nplot_class_distribution_comparison(spe_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# Specify the class targeted by the under-sampling\n# -------------------------------------------------\n# **Set parameter ``target_label``: int** \n# All other classes that have more samples than the target class will be considered as majority classes. \n# They will be under-sampled until the number of samples is equalized. \n# The remaining minority classes (if any) will stay unchanged.\n\nspe_clf.fit(X_train, y_train, \n target_label=1, # target class 1\n **fit_kwargs)\n\nresampling_type='After under-sampling (target class 1)'\nall_distribution[resampling_type] = spe_clf.target_distr_\nplot_class_distribution_comparison(spe_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# Specify the desired number of samples after under-sampling\n# -----------------------------------------------------------\n# **Set parameter ``n_target_samples``: int or dict** \n# If int, all classes that have more than the n_target_samples samples will be under-sampled until the number of samples is equalized.\n\nspe_clf.fit(X_train, y_train, \n n_target_samples=200, # target number of samples 200\n **fit_kwargs)\n\nresampling_type='After under-sampling (target number 200)'\nall_distribution[resampling_type] = spe_clf.target_distr_\nplot_class_distribution_comparison(spe_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# Specify the desired number of samples of each class after under-sampling\n# ------------------------------------------------------------------------\n# **Set parameter ``n_target_samples``: int or dict** \n# If dict, the keys correspond to the targeted classes. The values correspond to the desired number of samples for each targeted class.\n\nspe_clf.fit(X_train, y_train, \n n_target_samples={\n 0: 80,\n 1: 200,\n 2: 400,\n }, # target number of samples\n **fit_kwargs)\n\nresampling_type='After under-sampling \\n(target number {0: 80, 1: 200, 2: 400})'\nall_distribution[resampling_type] = spe_clf.target_distr_\nplot_class_distribution_comparison(spe_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# Over-sampling\n# ----------------------------\n# By default, over-sampling-based ensemble methods will consider the largest class as the majority class (class 2 with 600 samples). \n# All other classes (class 0 and 1) will be considered as minority classes and will be over-sampled until the number of samples is equalized.\n\n# %% [markdown]\n# **The over-sampling schedule can be customized in the same way as under-sampling.**\n\n# %% [markdown]\n# Take ``SMOTEBoostClassifier`` as example\n\nsmoteboost_clf = imbens.ensemble.SMOTEBoostClassifier(**init_kwargs)\n\n\n# %% [markdown]\n# **Train with the default under-sampling setting**\n\nsmoteboost_clf.fit(X_train, y_train, **fit_kwargs)\n\nall_distribution['Before over-sampling'] = smoteboost_clf.origin_distr_\nresampling_type='After default over-sampling'\nall_distribution[resampling_type] = smoteboost_clf.target_distr_\nplot_class_distribution_comparison(smoteboost_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# **Specify the class targeted by the over-sampling**\n\nsmoteboost_clf.fit(X_train, y_train, \n target_label=1, # target class 1\n **fit_kwargs)\n\nresampling_type='After over-sampling (target class 1)'\nall_distribution[resampling_type] = smoteboost_clf.target_distr_\nplot_class_distribution_comparison(smoteboost_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# **Specify the desired number of samples after over-sampling**\n\nsmoteboost_clf.fit(X_train, y_train, \n n_target_samples=400, # target number of samples 400\n **fit_kwargs)\n\nresampling_type='After over-sampling (target number 400)'\nall_distribution[resampling_type] = smoteboost_clf.target_distr_\nplot_class_distribution_comparison(smoteboost_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# **Specify the desired number of samples of each class after over-sampling**\n\nsmoteboost_clf.fit(X_train, y_train, \n n_target_samples={\n 0: 200,\n 1: 400,\n 2: 600,\n }, # target number of samples\n **fit_kwargs)\n\nresampling_type='After over-sampling \\n(target number {0: 200, 1: 400, 2: 600})'\nall_distribution[resampling_type] = smoteboost_clf.target_distr_\nplot_class_distribution_comparison(smoteboost_clf, title2=resampling_type)\n\n\n# %% [markdown]\n# Visualize different resampling target\n# ---------------------------------------\n\nsns.set_context('notebook')\nfig, axes = plt.subplots(2, 5, figsize=(20, 8))\nfor ax, title in zip(axes.flatten(), list(all_distribution.keys())):\n plot_class_distribution(all_distribution[title], ax=ax, palette=\"Blues_d\")\n ax.set(ylim=ylim, title=title)\nfig.tight_layout()\n"
] |
[
[
"sklearn.utils._testing.SkipTest"
],
[
"sklearn.utils.validation._check_sample_weight",
"sklearn.utils.random.sample_without_replacement",
"sklearn.utils.validation.has_fit_parameter",
"sklearn.utils.fixes.delayed",
"numpy.ones",
"sklearn.base.clone",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble._base._partition_estimators",
"numpy.where",
"sklearn.utils.check_random_state"
],
[
"sklearn.model_selection.train_test_split"
],
[
"sklearn.datasets.make_classification",
"sklearn.model_selection.train_test_split"
],
[
"matplotlib.pyplot.subplots",
"sklearn.datasets.make_classification",
"sklearn.model_selection.train_test_split"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Carles-Figuerola/Vitis-AI
|
[
"611b82cfc32ea2fe04491432bf8feed1f378c9de",
"611b82cfc32ea2fe04491432bf8feed1f378c9de",
"e86b6efae11f8703ee647e4a99004dc980b84989",
"611b82cfc32ea2fe04491432bf8feed1f378c9de",
"611b82cfc32ea2fe04491432bf8feed1f378c9de",
"611b82cfc32ea2fe04491432bf8feed1f378c9de"
] |
[
"dsa/DPU-for-RNN/app/customer_satisfaction/run_cpu_e2e.py",
"tools/RNN/rnn_quantizer/example/new_api_resnet18_quant.py",
"tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/layers/conv_batchnorm_test_utils.py",
"demo/Whole-App-Acceleration/resnet50_mt_py_waa/resnet50.py",
"tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/pruning/utils.py",
"tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/layers/vitis_quantize_layer.py"
] |
[
"\"\"\"\nCopyright 2019 Xilinx Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport time \n\nfilename = \"model/car_rental_training_data.csv\"\n\ndata = pd.read_csv(filename, sep=';')\ndata.head()\n\ncomplain_data = data[['Customer_Service', 'Satisfaction']]\n\n#print(complain_data.count())\n\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python import keras\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.python.keras.preprocessing.text import Tokenizer\nimport tensorflow as tf\n\nimport datetime\n\nmax_features = 500\nhidden_size = 100\n\nfor idx, row in complain_data.iterrows():\n row[0] = row[0].replace('rt',' ')\n\ntokenizer = Tokenizer(num_words=max_features, split=' ')\ntokenizer.fit_on_texts(complain_data['Customer_Service'].values)\nX = tokenizer.texts_to_sequences(complain_data['Customer_Service'].values)\n\nmaxlen = 50\nX = pad_sequences(X, maxlen=maxlen)\n\nY = complain_data['Satisfaction'].values\nX_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33, random_state = 42)\n\n#print(X_train.shape,Y_train.shape)\n#print(X_test.shape,Y_test.shape)\n\nembedding_vector_length = 32\n\nmodel = Sequential()\nmodel.add(layers.Embedding(max_features, embedding_vector_length, input_length=maxlen))\nmodel.add(layers.Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))\nmodel.add(layers.MaxPooling1D(pool_size=2))\nmodel.add(layers.LSTM(100, recurrent_activation='sigmoid'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\nfilename = './model/complain_model.h5'\nis_training = False\nif is_training:\n model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=20, batch_size=64)\n\n # Evaluate the model\n scores = model.evaluate(X_test, Y_test, verbose=0)\n print(\"Evaluation Accuracy: %.2f%%\" % (scores[1]*100))\n model.save(filename, save_format='tf')\nelse:\n model.load_weights(filename)\n\nfrom tensorflow.python.keras import backend as K\nimport datetime as dt\n#t1=time.time()\n# layers: [Embedding, Conv1D, MaxPooling1D, LSTM, Dense]\n#print(\"x_test size:\", X_test.shape)\nlstm_upstream = K.function([model.layers[0].input], [model.layers[2].output])\nlstm_input = lstm_upstream([X_test])[0]\n\nbatches = lstm_input.shape[0]\nlstm_output = np.zeros((batches, 100))\nlstm_tmp = np.zeros((1, 100))\nlstm = K.function([model.layers[3].input], [model.layers[3].output])\nt1=time.time()\nfor index, x in enumerate(lstm_input):\n lstm_input_batch1 = x.reshape(1,25,32)\n lstm_output[index] = lstm(lstm_input_batch1)[0]\n\n#lstm = K.function([model.layers[3].input], [model.layers[3].output])\n#lstm_start = dt.datetime.now()\n#lstm_output = lstm([lstm_input])[0]\n#lstm_finish = dt.datetime.now()\n#print('lstm foward time(secs):', (lstm_finish - lstm_start).total_seconds())\n#lstm_out = lstm_output_batch1.reshape((batches, 25, 100))\nlstm_downstream = Sequential()\nlstm_downstream.add(layers.Dense(1, activation='sigmoid'))\nlstm_downstream.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nlstm_downstream.build((1, 100))\nlstm_downstream.layers[0].set_weights(model.get_layer('dense').get_weights())\nscore = lstm_downstream.evaluate(lstm_output, Y_test, verbose=0)\nt2=time.time()\nprint('Accuracy:', score[1])\nprint('E2E Time:', t2-t1)\n#print(tf.__version__)\n",
"import os\nimport re\nimport sys\nimport argparse\nimport time\nimport pdb\nimport random\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision.models.resnet import resnet18\n\ntorch.set_default_dtype(torch.double)\n\nfrom nndct_shared.utils import print_center_edge\nfrom nndct_shared.utils import basic_info\nfrom nndct_shared.utils import check_diff\nfrom pytorch_nndct.apis import torch_quantizer, dump_xmodel\n\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n '--data_dir',\n default=\"/proj/rdi/staff/niuxj/imagenet/\",\n help='Data set directory')\nparser.add_argument(\n '--model_dir',\n #default=\"./thmodels/\",\n default=\"/proj/rdi/staff/wluo/UNIT_TEST/models\",\n help='Trained model file path. Download pretrained model from the following url and put it in model_dir specified path: https://download.pytorch.org/models/resnet18-5c106cde.pth'\n)\nparser.add_argument('--quant_mode', default=1, type=int)\nargs, _ = parser.parse_known_args()\n\ndef load_data(train=True,\n data_dir='dataset/imagenet',\n batch_size=128,\n subset_len=None,\n sample_method='random',\n distributed=False,\n model_name='resnet18',\n **kwargs):\n\n #prepare data\n # random.seed(12345)\n traindir = data_dir + '/train'\n valdir = data_dir + '/val'\n train_sampler = None\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n if model_name == 'inception_v3':\n size = 299\n resize = 299\n else:\n size = 224\n resize = 256\n if train:\n dataset = torchvision.datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n if subset_len:\n assert subset_len <= len(dataset)\n if sample_method == 'random':\n dataset = torch.utils.data.Subset(\n dataset, random.sample(range(0, len(dataset)), subset_len))\n else:\n dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))\n if distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=(train_sampler is None),\n sampler=train_sampler,\n **kwargs)\n else:\n dataset = torchvision.datasets.ImageFolder(valdir,)\n dataset = torchvision.datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(resize),\n transforms.CenterCrop(size),\n transforms.ToTensor(),\n normalize,\n ]))\n if subset_len:\n assert subset_len <= len(dataset)\n if sample_method == 'random':\n dataset = torch.utils.data.Subset(\n dataset, random.sample(range(0, len(dataset)), subset_len))\n else:\n dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, **kwargs)\n return data_loader, train_sampler\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions\n for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef evaluate(model, val_loader, loss_fn):\n\n model.eval()\n model = model.cuda()\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n total = 0\n Loss = 0\n for iteraction, (images, labels) in tqdm(\n enumerate(val_loader), total=len(val_loader)):\n images = images.cuda()\n images = images.cuda().to(dtype=torch.get_default_dtype())\n labels = labels.cuda()\n #pdb.set_trace()\n outputs = model(images)\n loss = loss_fn(outputs, labels)\n Loss += loss.item()\n total += images.size(0)\n acc1, acc5 = accuracy(outputs, labels, topk=(1, 5))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n return top1.avg, top5.avg, Loss / total\n\ndef quantization(title='optimize', model_name='', file_path='', quant_mode=1):\n\n batch_size = 32\n\n model = resnet18().cpu()\n model.load_state_dict(torch.load(file_path))\n\n input = torch.randn([batch_size, 3, 224, 224])\n if quant_mode < 1:\n quant_model = model\n else:\n ## new api\n ####################################################################################\n quantizer = torch_quantizer(\n 'calib', model, (input), output_dir='resnet18')\n\n quant_model = quantizer.quant_model\n #####################################################################################\n\n # to get loss value after evaluation\n loss_fn = torch.nn.CrossEntropyLoss().cuda()\n\n val_loader, _ = load_data(\n subset_len=100,\n train=False,\n batch_size=batch_size,\n sample_method='random',\n data_dir=args.data_dir,\n model_name=model_name)\n\n # record modules float model accuracy\n # add modules float model accuracy here\n acc_org1 = 0.0\n acc_org5 = 0.0\n loss_org = 0.0\n\n #register_modification_hooks(model_gen, train=False)\n acc1_gen, acc5_gen, loss_gen = evaluate(quant_model, val_loader, loss_fn)\n\n # handle quantization result\n if quant_mode > 0:\n quantizer.export_quant_config()\n if quant_mode == 2:\n dump_xmodel('resnet18', True)\n\n # logging accuracy\n if args.quant_mode == 2:\n basic_info(loss_gen, 'quantized model loss')\n basic_info(acc1_gen, 'quantized model top-1 accuracy')\n basic_info(acc5_gen, 'quantized model top-5 accuracy')\n elif args.quant_mode == 1:\n basic_info(loss_gen, 'calibration model loss')\n basic_info(acc1_gen, 'calibration model top-1 accuracy')\n basic_info(acc5_gen, 'calibration model top-5 accuracy')\n elif args.quant_mode == 0:\n basic_info(loss_gen, 'float model loss')\n basic_info(acc1_gen, 'float model top-1 accuracy')\n basic_info(acc5_gen, 'float model top-5 accuracy')\n\nif __name__ == '__main__':\n\n model_name = 'resnet18'\n file_path = os.path.join(args.model_dir, model_name + '.pth')\n\n feature_test = ' float model evaluation'\n if args.quant_mode > 0:\n feature_test = ' quantization'\n # force to merge BN with CONV for better quantization accuracy\n args.optimize = 1\n feature_test += ' with optimization'\n else:\n feature_test = ' float model evaluation'\n title = model_name + feature_test\n\n print_center_edge(\" Start {} test \".format(title))\n\n # calibration or evaluation\n quantization(\n title=title,\n model_name=model_name,\n file_path=file_path,\n quant_mode=args.quant_mode)\n\n print_center_edge(\" End of {} test \".format(title))\n",
"# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test utils for conv batchnorm folding.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_model_optimization.python.core.quantization.keras.layers import conv_batchnorm\n\nkeras = tf.keras\n\n_ConvBatchNorm2D = conv_batchnorm._ConvBatchNorm2D # pylint: disable=protected-access\n_DepthwiseConvBatchNorm2D = conv_batchnorm._DepthwiseConvBatchNorm2D # pylint: disable=protected-access\n\n\ndef _get_conv2d_params():\n return {\n 'kernel_size': (3, 3),\n 'input_shape': (10, 10, 3),\n 'batch_size': 8,\n }\n\n\ndef _get_initializer(random_init):\n if random_init:\n kernel_initializer = keras.initializers.glorot_uniform()\n else:\n kernel_initializer = keras.initializers.glorot_uniform(seed=0)\n return kernel_initializer\n\n\nclass Conv2DModel(object):\n \"\"\"Construct and access Conv + BatchNorm + activation models.\"\"\"\n\n params = {\n 'filters': 2,\n 'kernel_size': (2, 2),\n 'input_shape': (3, 3, 3),\n 'batch_size': 1,\n }\n\n @classmethod\n def get_batched_input_shape(cls):\n \"\"\"Return input shape with batch size.\"\"\"\n shape = [cls.params['batch_size']]\n shape.extend(cls.params['input_shape'])\n return shape\n\n @classmethod\n def get_output_shape(cls):\n return [cls.params['batch_size'], 2, 2, 2]\n\n @classmethod\n def get_folded_batchnorm_model(cls,\n is_quantized=False,\n post_bn_activation=None):\n \"\"\"Return folded Conv2D + BN + optional activation model.\"\"\"\n return tf.keras.Sequential([\n _ConvBatchNorm2D(\n kernel_initializer=_get_initializer(random_init=False),\n is_quantized=is_quantized,\n post_activation=post_bn_activation,\n **cls.params)\n ])\n\n @classmethod\n def get_nonfolded_batchnorm_model(cls,\n post_bn_activation=None,\n model_type='sequential',\n random_init=False):\n \"\"\"Return nonfolded Conv2D + BN + optional activation model.\"\"\"\n if model_type == 'sequential':\n layers = [\n keras.layers.Conv2D(\n kernel_initializer=_get_initializer(random_init),\n use_bias=False,\n **cls.params),\n keras.layers.BatchNormalization(axis=-1),\n ]\n if post_bn_activation is not None:\n layers += post_bn_activation\n return tf.keras.Sequential(layers)\n else:\n inp = keras.layers.Input(cls.params['input_shape'],\n cls.params['batch_size'])\n x = keras.layers.Conv2D(\n cls.params['filters'],\n cls.params['kernel_size'],\n kernel_initializer=_get_initializer(random_init),\n use_bias=False)(\n inp)\n out = keras.layers.BatchNormalization(axis=-1)(x)\n if post_bn_activation is not None:\n out = post_bn_activation(out)\n return tf.keras.Model(inp, out)\n\n\nclass DepthwiseConv2DModel(Conv2DModel):\n \"\"\"Construct and access DepthwiseConv + BatchNorm + activation models.\"\"\"\n\n params = {\n 'kernel_size': (3, 3),\n 'input_shape': (10, 10, 3),\n 'batch_size': 8,\n }\n\n @classmethod\n def get_output_shape(cls):\n return [cls.params['batch_size'], 8, 8, 3]\n\n @classmethod\n def get_folded_batchnorm_model(cls,\n is_quantized=False,\n post_bn_activation=None):\n return tf.keras.Sequential([\n _DepthwiseConvBatchNorm2D(\n depthwise_initializer=_get_initializer(random_init=False),\n is_quantized=is_quantized,\n post_activation=post_bn_activation,\n **cls.params)\n ])\n\n @classmethod\n def get_nonfolded_batchnorm_model(cls,\n post_bn_activation=None,\n model_type='sequential',\n random_init=False):\n if model_type == 'sequential':\n layers = [\n keras.layers.DepthwiseConv2D(\n depthwise_initializer=_get_initializer(random_init),\n use_bias=False,\n **cls.params),\n keras.layers.BatchNormalization(axis=-1),\n ]\n if post_bn_activation is not None:\n layers += post_bn_activation\n return tf.keras.Sequential(layers)\n else:\n inp = keras.layers.Input(cls.params['input_shape'],\n cls.params['batch_size'])\n x = keras.layers.DepthwiseConv2D(\n cls.params['kernel_size'],\n depthwise_initializer=_get_initializer(random_init),\n use_bias=False)(\n inp)\n out = keras.layers.BatchNormalization(axis=-1)(x)\n if post_bn_activation is not None:\n out = post_bn_activation(out)\n return tf.keras.Model(inp, out)\n",
"\"\"\"\nCopyright 2019 Xilinx Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom ctypes import *\nfrom typing import List\nimport cv2\nimport numpy as np\nimport xir\nimport vart\nimport os\nimport math\nimport threading\nimport time\nimport sys\n\n\"\"\"\nCalculate softmax\ndata: data to be calculated\nsize: data size\nreturn: softamx result\n\"\"\"\n\n\ndef CPUCalcSoftmax(data, size):\n sum = 0.0\n result = [0 for i in range(size)]\n for i in range(size):\n result[i] = math.exp(data[i])\n sum += result[i]\n for i in range(size):\n result[i] /= sum\n return result\n\n\ndef get_script_directory():\n path = os.getcwd()\n return path\n\n\n\"\"\"\nGet topk results according to its probability\ndatain: data result of softmax\nfilePath: filePath in witch that records the infotmation of kinds\n\"\"\"\n\n\ndef TopK(datain, size, filePath):\n\n cnt = [i for i in range(size)]\n pair = zip(datain, cnt)\n pair = sorted(pair, reverse=True)\n softmax_new, cnt_new = zip(*pair)\n fp = open(filePath, \"r\")\n data1 = fp.readlines()\n fp.close()\n for i in range(5):\n idx = 0\n for line in data1:\n if idx == cnt_new[i]:\n print(\"Top[%d] %d %s\" % (i, idx, (line.strip)(\"\\n\")))\n idx = idx + 1\n\"\"\"\npre-process for resnet50 (caffe)\n\"\"\"\n_B_MEAN = 104.0\n_G_MEAN = 107.0\n_R_MEAN = 123.0\nMEANS = [_B_MEAN,_G_MEAN,_R_MEAN]\nSCALES = [1.0, 1.0, 1.0]\n\ndef preprocess_one_image_fn(image_path, width=224, height=224):\n means = MEANS\n scales = SCALES\n image = cv2.imread(image_path)\n image = cv2.resize(image,(width, height))\n B, G, R = cv2.split(image)\n B = (B - means[0]) * scales[0]\n G = (G - means[1]) * scales[1]\n R = (R - means[2]) * scales[2]\n image = cv2.merge([B, G, R])\n return image\n\n\nSCRIPT_DIR = get_script_directory()\ncalib_image_dir = SCRIPT_DIR + \"/images/\"\nglobal threadnum\nthreadnum = 0\n\n\"\"\"\nrun resnt50 with batch\nrunner: dpu runner\nimg: imagelist to be run\ncnt: threadnum\n\"\"\"\n\n\ndef runResnet50(runner: \"Runner\", img, cnt):\n \"\"\"get tensor\"\"\"\n inputTensors = runner.get_input_tensors()\n outputTensors = runner.get_output_tensors()\n input_ndim = tuple(inputTensors[0].dims)\n pre_output_size = int(outputTensors[0].get_data_size() / input_ndim[0])\n\n\n output_ndim = tuple(outputTensors[0].dims)\n n_of_images = len(img)\n count = 0\n while count < cnt:\n runSize = input_ndim[0]\n \"\"\"prepare batch input/output \"\"\"\n inputData = [np.empty(input_ndim, dtype=np.float32, order=\"C\")]\n outputData = [np.empty(output_ndim, dtype=np.float32, order=\"C\")]\n\n \"\"\"init input image to input buffer \"\"\"\n for j in range(runSize):\n imageRun = inputData[0]\n imageRun[j, ...] = img[(count + j) % n_of_images].reshape(input_ndim[1:])\n\n \"\"\"run with batch \"\"\"\n job_id = runner.execute_async(inputData, outputData)\n runner.wait(job_id)\n\n\n \"\"\"softmax&TopK calculate with batch \"\"\"\n \"\"\"Benchmark DPU FPS performance over Vitis AI APIs execute_async() and wait() \"\"\"\n \"\"\"Uncomment the following code snippet to include softmax calculation for model’s end-to-end FPS evaluation \"\"\"\n #for j in range(runSize):\n # softmax = CPUCalcSoftmax(outputData[0][j], pre_output_size)\n # TopK(softmax, pre_output_size, \"./words.txt\")\n\n count = count + runSize\n\"\"\"\n obtain dpu subgrah\n\"\"\"\ndef get_child_subgraph_dpu(graph: \"Graph\") -> List[\"Subgraph\"]:\n assert graph is not None, \"'graph' should not be None.\"\n root_subgraph = graph.get_root_subgraph()\n assert (\n root_subgraph is not None\n ), \"Failed to get root subgraph of input Graph object.\"\n if root_subgraph.is_leaf:\n return []\n child_subgraphs = root_subgraph.toposort_child_subgraph()\n assert child_subgraphs is not None and len(child_subgraphs) > 0\n return [\n cs\n for cs in child_subgraphs\n if cs.has_attr(\"device\") and cs.get_attr(\"device\").upper() == \"DPU\"\n ]\n\n\ndef main(argv):\n global threadnum\n\n listimage = os.listdir(calib_image_dir)\n threadAll = []\n threadnum = int(argv[1])\n i = 0\n global runTotall\n runTotall = len(listimage)\n g = xir.Graph.deserialize(argv[2])\n subgraphs = get_child_subgraph_dpu(g)\n assert len(subgraphs) == 1 # only one DPU kernel\n all_dpu_runners = []\n for i in range(int(threadnum)):\n all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], \"run\"))\n \"\"\"image list to be run \"\"\"\n img = []\n time_start = time.time()\n for i in range(runTotall):\n path = os.path.join(calib_image_dir, listimage[i])\n img.append(preprocess_one_image_fn(path))\n\n cnt = 1\n \"\"\"run with batch \"\"\" \n for i in range(int(threadnum)):\n t1 = threading.Thread(target=runResnet50, args=(all_dpu_runners[i], img, cnt))\n threadAll.append(t1)\n for x in threadAll:\n x.start()\n for x in threadAll:\n x.join()\n\n del all_dpu_runners\n\n time_end = time.time()\n timetotal = time_end - time_start\n total_frames = runTotall\n fps = float(total_frames / timetotal)\n print(\n \"FPS=%.2f, total frames = %.2f , time=%.6f seconds\"\n % (fps, total_frames, timetotal)\n )\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"usage : python3 resnet50.py <thread_number> <resnet50_xmodel_file>\")\n else:\n main(sys.argv)\n",
"\n\n#\n# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport copy\nimport importlib\nimport random\nimport string\nimport sys\nimport tempfile\nimport torch\n\nfrom collections import OrderedDict\nfrom torch.nn import DataParallel\nfrom torch.nn.parallel import DistributedDataParallel\n\nfrom nndct_shared.base import NNDCT_OP\nfrom nndct_shared.nndct_graph.base_tensor import Tensor\nfrom nndct_shared.pruning import pruning_lib\nfrom nndct_shared.utils import logging\nfrom pytorch_nndct.qproc import utils as api_utils\nfrom pytorch_nndct.export import get_script_writer\nfrom pytorch_nndct.utils import TorchSymbol\n\n_torch_layouts = {2: 'OI', 4: 'OIHW'}\n_nndct_layouts = {2: 'OI', 4: 'OHWI'}\n\ndef torch_to_nndct(tensor):\n return transpose_tensor(tensor, _torch_layouts, _nndct_layouts)\n\ndef nndct_to_torch(tensor):\n return transpose_tensor(tensor, _nndct_layouts, _torch_layouts)\n\ndef transpose_tensor(tensor, src_layouts, dst_layouts):\n if not isinstance(tensor, Tensor):\n raise TypeError(\"'tensor' must be Tensor, but given {}\".format(\n type(tensor)))\n if tensor.ndim != 4 and tensor.ndim != 2:\n return tensor\n\n src_layout = src_layouts[tensor.ndim]\n dst_layout = dst_layouts[tensor.ndim]\n\n axis = [src_layout.index(d) for d in dst_layout]\n tensor.transpose(axis)\n return tensor\n\ndef torch_tensor_from_nndct(tensor):\n replicated_tensor = copy.deepcopy(tensor)\n return torch.from_numpy(\n transpose_tensor(replicated_tensor, _nndct_layouts, _torch_layouts).data)\n\ndef _inspect_ana(sens_path):\n net_sens = pruning_lib.read_sens(sens_path)\n print(net_sens)\n return net_sens\n\ndef dummy_inputs(input_specs):\n inputs = []\n for spec in input_specs:\n inputs.append(torch.rand(1, *spec.shape).type(spec.dtype))\n return inputs\n\ndef unwrap_parallel_module(module):\n if isinstance(module, (DataParallel, DistributedDataParallel)):\n model = module.module\n else:\n model = module\n return model\n\ndef raw_param_name(full_param_name):\n return full_param_name.split('.')[-1]\n\ndef random_str(str_length=4):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(str_length))\n\ndef readable_num(number):\n s = ''\n if number < 0:\n s += '-'\n number = -number\n\n if number < 1000:\n s += '%d' % number\n elif number > 1e15:\n s += '%0.3G' % number\n else:\n units = 'KMGT'\n unit_index = 0\n while number > 1000000:\n number /= 1000\n unit_index += 1\n s += '%.2f%s' % (number / 1000.0, units[unit_index])\n return s\n\ndef pad_to_sparse_tensor(tensor, pruning_info):\n \"\"\"Pad tensor with zeros by given pruning_info.\n Restore the tensor to its original unpruned shape and use zeros to fill\n in the removed input/output channels.\n [100, 60, 3, 3] -> [128, 64, 3, 3]\n \"\"\"\n shape = list(tensor.shape)\n # OIHW for 4 dims, OI for 2 dims.\n orig_out_channels = shape[0] + len(pruning_info.removed_outputs)\n\n # Pad output channels.\n shape[0] = orig_out_channels\n out_padded_tensor = torch.zeros(shape, dtype=tensor.dtype)\n index = 0\n for axis in range(orig_out_channels):\n if axis not in pruning_info.removed_outputs:\n out_padded_tensor[axis] = tensor[index]\n index += 1\n\n if len(shape) < 2:\n return out_padded_tensor\n\n orig_in_channels = shape[1] + len(pruning_info.removed_inputs)\n # Pad input channels.\n shape[1] = orig_in_channels\n in_padded_tensor = torch.zeros(shape, dtype=tensor.dtype)\n index = 0\n for axis in range(orig_in_channels):\n if axis not in pruning_info.removed_inputs:\n in_padded_tensor[:, axis] = out_padded_tensor[:, index]\n index += 1\n return in_padded_tensor\n\ndef rebuild_module(graph):\n _, filename = tempfile.mkstemp(suffix='.py', text=True)\n writer = get_script_writer(enable_quant=False)\n writer.write(graph, filename)\n\n #module_name = graph.name\n py_module_name = \"_\".join([\"nndct\", random_str()])\n spec = importlib.util.spec_from_file_location(py_module_name, filename)\n py_module = importlib.util.module_from_spec(spec)\n sys.modules[py_module_name] = py_module\n spec.loader.exec_module(py_module)\n rebuilt_module = py_module.__dict__[graph.name]()\n\n api_utils.connect_module_with_graph(rebuilt_module, graph)\n return rebuilt_module, filename\n\ndef map_rebuilt_module_to_node(model, graph):\n module_to_node = {}\n for name, module in model.named_children():\n # module_name -> node_id\n node_idx = int(name.split(TorchSymbol.MODULE_NAME_SEPERATOR)[-1])\n node = graph.get_node_by_idx(node_idx)\n module_to_node[name] = node\n return module_to_node\n\ndef map_original_module_to_node(model, graph):\n module_to_node = {}\n for node in graph.nodes:\n attr_names = []\n # TODO(yuwang): Use pytorch_nndct/utils/module_utils.py::module_name_from_node\n # ResNet/Sequential[layer4]/BasicBlock[1]/Conv2d[conv1]/input.50\n parts = node.name.split('/')[1:-1]\n for part in parts:\n left_bracket = part.index('[')\n right_bracket = part.index(']')\n attr_names.append(part[left_bracket + 1:right_bracket])\n\n module = model\n for attr_name in attr_names:\n module = getattr(module, attr_name)\n module_to_node[id(module)] = node.name\n return module_to_node\n",
"# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vitis quantize layers.\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.base import quantizers\n\nserialize_keras_object = tf.keras.utils.serialize_keras_object\ndeserialize_keras_object = tf.keras.utils.deserialize_keras_object\n\n\nclass QuantizeLayer(tf.keras.layers.Layer):\n \"\"\"Emulate quantization of tensors passed through the layer.\"\"\"\n\n def __init__(self, quantizer, mode, **kwargs):\n \"\"\"Create a QuantizeLayer.\n\n Args:\n quantizer: `Quantizer` used to quantize tensors.\n **kwargs: Additional keyword arguments to be passed to the keras layer.\n \"\"\"\n super(QuantizeLayer, self).__init__(**kwargs)\n\n if quantizer is None or not isinstance(quantizer, quantizers.Quantizer):\n raise ValueError('quantizer should not be None, and should be an instance'\n 'of `tfmot.quantization.keras.quantizers.Quantizer`.')\n\n self.quantizer = quantizer\n self._mode = mode\n\n def build(self, input_shape):\n self.quantizer_vars = self.quantizer.build(input_shape, self.name, self)\n\n self.optimizer_step = self.add_weight(\n 'optimizer_step',\n initializer=tf.keras.initializers.Constant(-1),\n dtype=tf.dtypes.int32,\n trainable=False)\n\n def get_quantize_info(self):\n return {'type': 'input', 'info': self.quantizer.get_quantize_info()}\n\n def set_quantize_info(self, new_quantize_info):\n self.quantizer.set_quantize_info(new_quantize_info['info'])\n\n def call(self, inputs, training=None):\n if training is None:\n training = tf.keras.backend.learning_phase()\n\n def _make_quantizer_fn(train_var):\n\n def quantizer_fn():\n return self.quantizer(\n inputs, train_var, self.mode, weights=self.quantizer_vars)\n\n return quantizer_fn\n\n return tf_utils.smart_cond(training, _make_quantizer_fn(True),\n _make_quantizer_fn(False))\n\n def get_config(self):\n base_config = super(QuantizeLayer, self).get_config()\n config = {\n 'quantizer': serialize_keras_object(self.quantizer),\n 'mode': self.mode\n }\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n config = config.copy()\n\n # Deserialization code should ensure Quantizer is in keras scope.\n quantizer = deserialize_keras_object(\n config.pop('quantizer'), module_objects=globals(), custom_objects=None)\n\n mode = config.pop('mode')\n\n return cls(quantizer=quantizer, mode=mode, **config)\n\n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, value):\n self._mode = value\n\n\nclass Quantize(tf.keras.layers.Layer):\n\n def __init__(self, bit_width, quantize_pos, **kwargs):\n super(Quantize, self).__init__()\n self.bit_width = bit_width\n self.quantize_pos = quantize_pos\n\n def call(self, inputs):\n lower_bound = -tf.math.pow(2.0, self.bit_width - 1)\n upper_bound = tf.math.pow(2.0, self.bit_width - 1) - 1\n lower_bound = tf.cast(lower_bound, tf.float32, name=\"lower_bound\")\n upper_bound = tf.cast(upper_bound, tf.float32, name=\"upper_bound\")\n\n step = tf.math.pow(\n 2.0, tf.cast(-self.quantize_pos, tf.float32), name=\"step\")\n\n divided = tf.math.divide(inputs, step, name=\"divided\")\n rounded = tf.math.round(divided, name=\"rounded\")\n quantized = tf.clip_by_value(\n rounded, lower_bound, upper_bound, name=\"quantized\")\n return quantized\n\n def get_config(self):\n config = super(Quantize, self).get_config()\n config.update({\n \"bit_width\": self.bit_width,\n \"quantize_pos\": self.quantize_pos\n })\n return config\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n\nclass Dequantize(tf.keras.layers.Layer):\n\n def __init__(self, bit_width, quantize_pos, **kwargs):\n super(Dequantize, self).__init__()\n self.bit_width = bit_width\n self.quantize_pos = quantize_pos\n\n def call(self, inputs):\n step = tf.math.pow(\n 2.0, tf.cast(-self.quantize_pos, tf.float32), name=\"step\")\n dequantized = tf.math.multiply(inputs, step, name=\"dequantized\")\n return dequantized\n\n def get_config(self):\n config = super(Dequantize, self).get_config()\n config.update({\n \"bit_width\": self.bit_width,\n \"quantize_pos\": self.quantize_pos\n })\n return config\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n"
] |
[
[
"pandas.read_csv",
"tensorflow.python.keras.layers.LSTM",
"tensorflow.python.keras.layers.Embedding",
"tensorflow.python.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"tensorflow.python.keras.preprocessing.text.Tokenizer",
"tensorflow.python.keras.preprocessing.sequence.pad_sequences",
"tensorflow.python.keras.backend.function",
"tensorflow.python.keras.Sequential",
"tensorflow.python.keras.layers.Conv1D",
"tensorflow.python.keras.layers.MaxPooling1D",
"numpy.zeros"
],
[
"torch.nn.CrossEntropyLoss",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.set_default_dtype",
"torch.randn",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.get_default_dtype"
],
[
"tensorflow.keras.Sequential",
"tensorflow.keras.Model"
],
[
"numpy.empty"
],
[
"torch.rand",
"torch.zeros"
],
[
"tensorflow.clip_by_value",
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.backend.learning_phase",
"tensorflow.cast",
"tensorflow.math.multiply",
"tensorflow.math.divide",
"tensorflow.math.pow",
"tensorflow.math.round"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.5"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yyb1995/software_technology_project
|
[
"4b5d9bf04a744b220e0b931917372f32d5bbfcfb"
] |
[
"hw2/pymoo/operators/crossover/simulated_binary_crossover.py"
] |
[
"import numpy as np\n\nfrom pymoo.model.crossover import Crossover\nfrom pymoo.rand import random\nfrom pymoo.util.misc import covert_to_type\n\n\nclass SimulatedBinaryCrossover(Crossover):\n def __init__(self, prob_cross, eta_cross):\n super().__init__(2, 2)\n self.prob_cross = float(prob_cross)\n self.eta_cross = float(eta_cross)\n\n def _do(self, problem, pop, parents, **kwargs):\n n_matings = parents.shape[0]\n children = np.full((n_matings * self.n_offsprings, problem.n_var), np.inf)\n X = pop.get(\"X\")[parents.T].astype(np.double)\n\n # crossover mask that will be used in the end\n do_crossover = np.full(X[0].shape, True)\n\n # simulating probability of doing a crossover with the parents at all\n do_crossover[random.random(n_matings) > self.prob_cross, :] = False\n # per variable the probability is then 50%\n do_crossover[random.random((n_matings, problem.n_var)) <= 0.5] = False\n # also if values are too close no mating is done\n do_crossover[np.abs(X[0] - X[1]) <= 1.0e-14] = False\n\n # assign y1 the smaller and y2 the larger value\n y1 = np.min(X, axis=0)\n y2 = np.max(X, axis=0)\n\n # random values for each individual\n rand = random.random((n_matings, problem.n_var))\n\n def calc_betaq(beta):\n alpha = 2.0 - np.power(beta, -(self.eta_cross + 1.0))\n\n mask, mask_not = (rand <= (1.0 / alpha)), (rand > (1.0 / alpha))\n\n betaq = np.zeros(mask.shape)\n betaq[mask] = np.power((rand * alpha), (1.0 / (self.eta_cross + 1.0)))[mask]\n betaq[mask_not] = np.power((1.0 / (2.0 - rand * alpha)), (1.0 / (self.eta_cross + 1.0)))[mask_not]\n\n return betaq\n\n # difference between all variables\n delta = (y2 - y1)\n\n # now just be sure not dividing by zero (these cases will be filtered later anyway)\n #delta[np.logical_or(delta < 1.0e-10, np.logical_not(do_crossover))] = 1.0e-10\n delta[delta < 1.0e-10] = 1.0e-10\n\n beta = 1.0 + (2.0 * (y1 - problem.xl) / delta)\n betaq = calc_betaq(beta)\n c1 = 0.5 * ((y1 + y2) - betaq * delta)\n\n beta = 1.0 + (2.0 * (problem.xu - y2) / delta)\n betaq = calc_betaq(beta)\n c2 = 0.5 * ((y1 + y2) + betaq * delta)\n\n # do randomly a swap of variables\n b = random.random((n_matings, problem.n_var)) <= 0.5\n val = c1[b]\n c1[b] = c2[b]\n c2[b] = val\n\n # take the parents as template\n c = X.astype(np.double)\n\n # copy the positions where the crossover was done\n c[0, do_crossover] = c1[do_crossover]\n c[1, do_crossover] = c2[do_crossover]\n\n # copy to the structure which is returned\n children[:n_matings, :] = c[0]\n children[n_matings:, :] = c[1]\n\n # just be sure we are not out of bounds\n children[children < problem.xl] = np.repeat(problem.xl[None, :], children.shape[0], axis=0)[\n children < problem.xl]\n children[children > problem.xu] = np.repeat(problem.xu[None, :], children.shape[0], axis=0)[\n children > problem.xu]\n\n children = covert_to_type(problem, children)\n return pop.new(\"X\", children)\n"
] |
[
[
"numpy.abs",
"numpy.min",
"numpy.power",
"numpy.full",
"numpy.max",
"numpy.repeat",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
david-yoon/UMIC
|
[
"0dfab17d826e65ae3cb112e3da300772b168776f"
] |
[
"model/ce.py"
] |
[
"\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\nUNITER for ITM model\n\"\"\"\nfrom collections import defaultdict\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n#from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm\nfrom torch.nn import LayerNorm\n\nfrom .layer import GELU\nfrom .model import UniterPreTrainedModel, UniterModel\nimport numpy as np\n\n\nclass UniterForCaptioningMetric(UniterPreTrainedModel):\n \"\"\" Finetune UNITER for Caption Evaluation\n \"\"\"\n def __init__(self, config, img_dim, margin=0.2):\n super().__init__(config)\n self.uniter = UniterModel(config, img_dim)\n self.itm_output = nn.Linear(config.hidden_size, 2)\n self.rank_output = nn.Linear(config.hidden_size, 1)\n self.margin = margin\n self.apply(self.init_weights)\n\n def init_output(self):\n \"\"\" need to be called after from pretrained only for the training step\"\"\"\n self.rank_output.weight.data = self.itm_output.weight.data[1:, :]\n self.rank_output.bias.data = self.itm_output.bias.data[1:]\n\n def forward(self, batch, compute_loss=True, compute_step_loss=False):\n batch = defaultdict(lambda: None, batch)\n input_ids = batch['input_ids']\n position_ids = batch['position_ids']\n img_feat = batch['img_feat']\n img_pos_feat = batch['img_pos_feat']\n attention_mask = batch['attn_masks']\n gather_index = batch['gather_index']\n\n sequence_output = self.uniter(input_ids, position_ids,\n img_feat, img_pos_feat,\n attention_mask, gather_index,\n output_all_encoded_layers=False)\n pooled_output = self.uniter.pooler(sequence_output)\n rank_scores = self.rank_output(pooled_output)\n\n if compute_loss:\n # triplet loss\n rank_scores_sigmoid = torch.sigmoid(rank_scores)\n sample_size = batch['sample_size']\n scores = rank_scores_sigmoid.contiguous().view(-1, sample_size)\n\n pos = scores[:, :1]\n neg = scores[:, 1:]\n \n rank_loss = torch.clamp(self.margin + neg - pos, 0)\n \n #print(\"## Rank Score Sigmoid Size: \", rank_scores_sigmoid.size())\n #print(\"## Scores size: \", scores.size()) \n\n return rank_loss, rank_scores\n else:\n return rank_scores\n\n\n\nclass UniterForCaptionEvaluationLinearBCE(UniterPreTrainedModel):\n \"\"\" Finetune UNITER for Caption Evaluation\n \"\"\"\n def __init__(self, config, img_dim, margin=0.2):\n super().__init__(config)\n self.uniter = UniterModel(config, img_dim)\n self.itm_output = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_weights)\n\n def forward(self, batch, compute_loss=True):\n batch = defaultdict(lambda: None, batch)\n input_ids = batch['input_ids']\n position_ids = batch['position_ids']\n img_feat = batch['img_feat']\n img_pos_feat = batch['img_pos_feat']\n attention_mask = batch['attn_masks']\n gather_index = batch['gather_index']\n sequence_output = self.uniter(input_ids, position_ids,\n img_feat, img_pos_feat,\n attention_mask, gather_index,\n output_all_encoded_layers=False)\n pooled_output = self.uniter.pooler(sequence_output)\n ce_scores = self.itm_output(pooled_output)\n\n if compute_loss:\n targets = batch['targets']\n ce_loss = F.binary_cross_entropy_with_logits(\n ce_scores, targets, reduction='none')\n return ce_loss\n else:\n return ce_scores\n\nclass UniterForCaptionEvaluationLinearRank(UniterPreTrainedModel):\n \"\"\" Finetune UNITER for Caption Evaluation\n \"\"\"\n def __init__(self, config, img_dim, margin=0.2):\n super().__init__(config)\n self.uniter = UniterModel(config, img_dim)\n self.itm_output = nn.Linear(config.hidden_size, 2)\n self.rank_output = nn.Linear(config.hidden_size, 1)\n self.margin = margin\n self.apply(self.init_weights)\n\n def forward(self, batch, compute_loss=True, is_val=False):\n batch = defaultdict(lambda: None, batch)\n input_ids = batch['input_ids']\n position_ids = batch['position_ids']\n img_feat = batch['img_feat']\n img_pos_feat = batch['img_pos_feat']\n attention_mask = batch['attn_masks']\n gather_index = batch['gather_index']\n \n sequence_output = self.uniter(input_ids, position_ids,\n img_feat, img_pos_feat,\n attention_mask, gather_index,\n output_all_encoded_layers=False)\n pooled_output = self.uniter.pooler(sequence_output)\n rank_scores = self.rank_output(pooled_output)\n\n if compute_loss:\n if(is_val):\n rank_scores_sigmoid = torch.sigmoid(rank_scores)\n else:\n # triplet loss\n rank_scores_sigmoid = torch.sigmoid(rank_scores)\n sample_size = batch['sample_size']\n scores = rank_scores_sigmoid.contiguous().view(-1, sample_size)\n\n pos = scores[:, :1]\n neg = scores[:, 1:]\n \n rank_loss = torch.clamp(self.margin + neg - pos, 0)\n \n return rank_loss, rank_scores\n else:\n return rank_scores\n"
] |
[
[
"torch.clamp",
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.functional.binary_cross_entropy_with_logits"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
giuliapezzutti/eeg-preprocessing
|
[
"a3edfcdac3d9ca273e2d8c2ac70a606747543f18"
] |
[
"src/functions.py"
] |
[
"import numpy as np\nfrom matplotlib import pyplot as plt\nimport pylab as py\nfrom scipy import optimize\n\n\ndef create_personality_matrix(num_personalities, num_data, personality_types):\n \"\"\"\n Creation of multiplication matrix and bias vector for the computation of the personality test according to the\n definition\n :param personality_types:\n :param num_personalities: number of personalities types in the study\n :param num_data: number of data to which the subject has answered\n :return: multiplication matrix and bias vector\n \"\"\"\n\n # empty personality matrix\n personality_matrix = np.zeros([num_personalities, num_data])\n\n # where to put +1 or -1 in the personality matrix for each row\n E = {'name': 'E', '+': [1, 11, 21, 31, 41], '-': [6, 16, 26, 36, 46]}\n A = {'name': 'A', '+': [7, 17, 27, 37, 42, 47], '-': [2, 12, 22, 32]}\n C = {'name': 'C', '+': [3, 13, 23, 33, 43, 48], '-': [8, 18, 28, 38]}\n N = {'name': 'N', '+': [9, 19], '-': [4, 14, 24, 29, 34, 39, 44, 49]}\n O = {'name': 'O', '+': [5, 15, 25, 35, 40, 45, 50], '-': [10, 20, 30]}\n\n # filling of the matrix according to the definition\n for dict in [E, A, C, N, O]:\n\n name = dict['name']\n plus = dict['+']\n minus = dict['-']\n\n index = personality_types.index(name)\n\n for idx in plus:\n personality_matrix[index, idx - 1] = +1\n for idx in minus:\n personality_matrix[index, idx - 1] = -1\n\n # personality bias vector definition according to the explanation\n personality_bias = [20, 14, 14, 38, 8]\n\n return personality_matrix, personality_bias\n\n\ndef derive_conditions_rois(labels):\n conditions = [s.split('/')[0] for s in labels]\n conditions = list(set(conditions))\n rois = [s.split('/')[1] for s in labels]\n rois = list(set(rois))\n return conditions, rois\n\n\ndef plot_mean_epochs(mean_signals, conditions, rois, erps):\n conditions = sorted(conditions)\n rois = sorted(rois)\n\n x_axis = mean_signals['blackwhite/central'].times * 1000\n\n fig, axs = plt.subplots(3, 2, figsize=(25.6, 19.2))\n\n path = '../images/epochs/manipulations.png'\n\n min_value = np.inf\n max_value = -np.inf\n\n for _, evoked in mean_signals.items():\n data = evoked.get_data()[0]\n min_value = min(min_value, min(data))\n max_value = max(max_value, max(data))\n\n for i, ax in enumerate(fig.axes):\n\n condition = conditions[i]\n correct_labels = [s for s in mean_signals.keys() if condition + '/' in s]\n correct_short_labels = [s.split('/')[1] for s in correct_labels]\n\n for idx, label in enumerate(correct_labels):\n ax.plot(x_axis, mean_signals[label].get_data()[0], label=correct_short_labels[idx])\n\n for erp in erps:\n ax.vlines(erp, ymin=min_value, ymax=max_value, linestyles='dashed')\n\n ax.set_xlabel('Time (\\u03bcs)')\n ax.set_ylabel('Amplitude (V)')\n ax.set_title(condition)\n\n plt.legend(bbox_to_anchor=(1.2, 2))\n plt.savefig(path)\n plt.close()\n\n fig, axs = plt.subplots(2, 2, figsize=(25.6, 19.2))\n path = '../images/epochs/rois.png'\n\n for i, ax in enumerate(fig.axes):\n\n roi = rois[i]\n\n correct_labels = [s for s in mean_signals.keys() if '/' + roi in s]\n correct_short_labels = [s.split('/')[0] for s in correct_labels]\n\n for idx, label in enumerate(correct_labels):\n ax.plot(x_axis, mean_signals[label].get_data()[0], label=correct_short_labels[idx])\n\n for erp in erps:\n ax.vlines(erp, ymin=min_value, ymax=max_value, linestyles='dashed')\n\n ax.set_xlabel('Time (\\u03bcs)')\n ax.set_ylabel('Amplitude (V)')\n ax.set_title(roi)\n\n plt.legend(bbox_to_anchor=(1.2, 1.1))\n plt.savefig(path)\n plt.close()\n\n\ndef get_fitted_normal_distribution(data, number_bins=100):\n # Equation for Gaussian\n def f(x, a, b, c):\n return a * py.exp(-(x - b) ** 2.0 / (2 * c ** 2))\n\n # Generate data from bins as a set of points\n x = [0.5 * (data[1][i] + data[1][i + 1]) for i in range(len(data[1]) - 1)]\n y = data[0]\n\n popt, pcov = optimize.curve_fit(f, x, y)\n\n x_fit = py.linspace(x[0], x[-1], number_bins)\n y_fit = f(x_fit, *popt)\n\n return x_fit, y_fit\n\n\ndef plot_distribution(array_data, path):\n bins = np.linspace(array_data.min(), array_data.max(), 100)\n data = py.hist(array_data, bins=bins)\n\n x_fit, y_fit = get_fitted_normal_distribution(data, number_bins=len(bins))\n plt.plot(x_fit, y_fit, lw=4, color=\"r\")\n\n plt.title((path.rsplit('.', 1)[0]).rsplit('/', 1)[1])\n plt.savefig(path)\n plt.close()\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.close",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
min19828257/AIWorldCup
|
[
"29a30796a35f7369d63c853b3cdf846481a385af"
] |
[
"examples/test_defense1/player_rulebased-B_py/player_rulebased-B.py"
] |
[
"#!/usr/bin/python3\n\nfrom __future__ import print_function\n\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks\n\nfrom autobahn.wamp.serializer import MsgPackSerializer\nfrom autobahn.wamp.types import ComponentConfig\nfrom autobahn.twisted.wamp import ApplicationSession, ApplicationRunner\n\nimport argparse\nimport random\nimport math\nimport sys\n\nimport base64\nimport numpy as np\n\nimport helper\n\n# reset_reason\nNONE = 0\nGAME_START = 1\nSCORE_MYTEAM = 2\nSCORE_OPPONENT = 3\nGAME_END = 4\nDEADLOCK = 5\nGOALKICK = 6\nCORNERKICK = 7\nPENALTYKICK = 8\nHALFTIME = 9\nEPISODE_END = 10\n\n# game_state\nSTATE_DEFAULT = 0\nSTATE_KICKOFF = 1\nSTATE_GOALKICK = 2\nSTATE_CORNERKICK = 3\nSTATE_PENALTYKICK = 4\n\n# coordinates\nMY_TEAM = 0\nOP_TEAM = 1\nBALL = 2\nX = 0\nY = 1\nTH = 2\nACTIVE = 3\nTOUCH = 4\n\n\nclass Received_Image(object):\n def __init__(self, resolution, colorChannels):\n self.resolution = resolution\n self.colorChannels = colorChannels\n # need to initialize the matrix at timestep 0\n self.ImageBuffer = np.zeros((resolution[1], resolution[0], colorChannels)) # rows, columns, colorchannels\n\n def update_image(self, received_parts):\n self.received_parts = received_parts\n for i in range(0, len(received_parts)):\n dec_msg = base64.b64decode(self.received_parts[i].b64, '-_') # decode the base64 message\n np_msg = np.fromstring(dec_msg, dtype=np.uint8) # convert byte array to numpy array\n reshaped_msg = np_msg.reshape((self.received_parts[i].height, self.received_parts[i].width, 3))\n for j in range(0, self.received_parts[i].height): # y axis\n for k in range(0, self.received_parts[i].width): # x axis\n self.ImageBuffer[j + self.received_parts[i].y, k + self.received_parts[i].x, 0] = reshaped_msg[\n j, k, 0] # blue channel\n self.ImageBuffer[j + self.received_parts[i].y, k + self.received_parts[i].x, 1] = reshaped_msg[\n j, k, 1] # green channel\n self.ImageBuffer[j + self.received_parts[i].y, k + self.received_parts[i].x, 2] = reshaped_msg[\n j, k, 2] # red channel\n\n\nclass SubImage(object):\n def __init__(self, x, y, width, height, b64):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.b64 = b64\n\n\nclass Frame(object):\n def __init__(self):\n self.time = None\n self.score = None\n self.reset_reason = None\n self.game_state = None\n self.subimages = None\n self.coordinates = None\n self.half_passed = None\n\n\nclass Component(ApplicationSession):\n \"\"\"\n AI Base + Rule Based Algorithm\n \"\"\"\n\n def __init__(self, config):\n ApplicationSession.__init__(self, config)\n\n def printConsole(self, message):\n print(message)\n sys.__stdout__.flush()\n\n def onConnect(self):\n self.join(self.config.realm)\n\n @inlineCallbacks\n def onJoin(self, details):\n\n ##############################################################################\n def init_variables(self, info):\n # Here you have the information of the game (virtual init() in random_walk.cpp)\n # List: game_time, number_of_robots\n # field, goal, penalty_area, goal_area, resolution Dimension: [x, y]\n # ball_radius, ball_mass,\n # robot_size, robot_height, axle_length, robot_body_mass, ID: [0, 1, 2, 3, 4]\n # wheel_radius, wheel_mass, ID: [0, 1, 2, 3, 4]\n # max_linear_velocity, max_torque, codewords, ID: [0, 1, 2, 3, 4]\n self.game_time = info['game_time']\n self.number_of_robots = info['number_of_robots']\n\n self.field = info['field']\n self.goal = info['goal']\n self.penalty_area = info['penalty_area']\n # self.goal_area = info['goal_area']\n self.resolution = info['resolution']\n\n self.ball_radius = info['ball_radius']\n # self.ball_mass = info['ball_mass']\n\n self.robot_size = info['robot_size']\n # self.robot_height = info['robot_height']\n # self.axle_length = info['axle_length']\n # self.robot_body_mass = info['robot_body_mass']\n\n # self.wheel_radius = info['wheel_radius']\n # self.wheel_mass = info['wheel_mass']\n\n self.max_linear_velocity = info['max_linear_velocity']\n # self.max_torque = info['max_torque']\n # self.codewords = info['codewords']\n\n self.colorChannels = 3\n self.end_of_frame = False\n self.image = Received_Image(self.resolution, self.colorChannels)\n self.cur_posture = []\n self.cur_ball = []\n self.prev_posture = []\n self.prev_ball = []\n self.previous_frame = Frame()\n self.received_frame = Frame()\n\n self.touch = [False,False,False,False,False]\n self.def_idx = 0\n self.atk_idx = 0\n\n self.wheels = [0 for _ in range(10)]\n return\n\n ##############################################################################\n\n try:\n info = yield self.call(u'aiwc.get_info', args.key)\n except Exception as e:\n self.printConsole(\"Error: {}\".format(e))\n else:\n try:\n self.sub = yield self.subscribe(self.on_event, args.key)\n except Exception as e2:\n self.printConsole(\"Error: {}\".format(e2))\n\n init_variables(self, info)\n\n try:\n yield self.call(u'aiwc.ready', args.key)\n except Exception as e:\n self.printConsole(\"Error: {}\".format(e))\n else:\n self.printConsole(\"I am ready for the game!\")\n\n # set the left and right wheel velocities of robot with id 'id'\n # 'max_velocity' scales the velocities up to the point where at least one of wheel is operating at max velocity\n def set_wheel_velocity(self, id, left_wheel, right_wheel, max_velocity):\n multiplier = 1\n\n # wheel velocities need to be scaled so that none of wheels exceed the maximum velocity available\n # otherwise, the velocity above the limit will be set to the max velocity by the simulation program\n # if that happens, the velocity ratio between left and right wheels will be changed that the robot may not execute\n # turning actions correctly.\n if (abs(left_wheel) > self.max_linear_velocity[id] or abs(right_wheel) > self.max_linear_velocity[\n id] or max_velocity):\n if (abs(left_wheel) > abs(right_wheel)):\n multiplier = self.max_linear_velocity[id] / abs(left_wheel)\n else:\n multiplier = self.max_linear_velocity[id] / abs(right_wheel)\n\n self.wheels[2 * id] = left_wheel * multiplier\n self.wheels[2 * id + 1] = right_wheel * multiplier\n\n # let the robot with id 'id' move to a target position (x, y)\n # the trajectory to reach the target position is determined by several different parameters\n def set_target_position(self, id, x, y, scale, mult_lin, mult_ang, max_velocity):\n damping = 0.35\n ka = 0\n sign = 1\n\n # calculate how far the target position is from the robot\n dx = x - self.cur_posture[id][X]\n dy = y - self.cur_posture[id][Y]\n d_e = math.sqrt(math.pow(dx, 2) + math.pow(dy, 2))\n\n # calculate how much the direction is off\n desired_th = (math.pi / 2) if (dx == 0 and dy == 0) else math.atan2(dy, dx)\n d_th = desired_th - self.cur_posture[id][TH]\n while (d_th > math.pi):\n d_th -= 2 * math.pi\n while (d_th < -math.pi):\n d_th += 2 * math.pi\n\n # based on how far the target position is, set a parameter that\n # decides how much importance should be put into changing directions\n # farther the target is, less need to change directions fastly\n if (d_e > 1):\n ka = 17 / 90\n elif (d_e > 0.5):\n ka = 19 / 90\n elif (d_e > 0.3):\n ka = 21 / 90\n elif (d_e > 0.2):\n ka = 23 / 90\n else:\n ka = 25 / 90\n\n # if the target position is at rear of the robot, drive backward instead\n if (d_th > helper.d2r(95)):\n d_th -= math.pi\n sign = -1\n elif (d_th < helper.d2r(-95)):\n d_th += math.pi\n sign = -1\n\n # if the direction is off by more than 85 degrees,\n # make a turn first instead of start moving toward the target\n if (abs(d_th) > helper.d2r(85)):\n self.set_wheel_velocity(id, -mult_ang * d_th, mult_ang * d_th, False)\n # otherwise\n else:\n # scale the angular velocity further down if the direction is off by less than 40 degrees\n if (d_e < 5 and abs(d_th) < helper.d2r(40)):\n ka = 0.1\n ka *= 4\n\n # set the wheel velocity\n # 'sign' determines the direction [forward, backward]\n # 'scale' scales the overall velocity at which the robot is driving\n # 'mult_lin' scales the linear velocity at which the robot is driving\n # larger distance 'd_e' scales the base linear velocity higher\n # 'damping' slows the linear velocity down\n # 'mult_ang' and 'ka' scales the angular velocity at which the robot is driving\n # larger angular difference 'd_th' scales the base angular velocity higher\n # if 'max_velocity' is true, the overall velocity is scaled to the point\n # where at least one wheel is operating at maximum velocity\n self.set_wheel_velocity(id,\n sign * scale * (mult_lin * (\n 1 / (1 + math.exp(-3 * d_e)) - damping) - mult_ang * ka * d_th),\n sign * scale * (mult_lin * (\n 1 / (1 + math.exp(-3 * d_e)) - damping) + mult_ang * ka * d_th),\n max_velocity)\n\n # copy coordinates from frames to different variables just for convenience\n def get_coord(self):\n self.cur_ball = self.received_frame.coordinates[BALL]\n self.cur_posture = self.received_frame.coordinates[MY_TEAM]\n self.cur_posture_op = self.received_frame.coordinates[OP_TEAM]\n self.prev_ball = self.previous_frame.coordinates[BALL]\n self.prev_posture = self.previous_frame.coordinates[MY_TEAM]\n self.prev_posture_op = self.previous_frame.coordinates[OP_TEAM]\n\n # find a defender and a forward closest to the ball\n def find_closest_robot(self):\n # find the closest defender\n min_idx = 0\n min_dist = 9999.99\n def_dist = 9999.99\n\n all_dist = []\n\n for i in [1, 2]:\n measured_dist = helper.dist(self.cur_ball[X], self.cur_posture[i][X], self.cur_ball[Y],\n self.cur_posture[i][Y])\n all_dist.append(measured_dist)\n if (measured_dist < min_dist):\n min_dist = measured_dist\n def_dist = min_dist\n min_idx = i\n\n self.def_idx = min_idx\n\n # find the closest forward\n min_idx = 0\n min_dist = 9999.99\n atk_dist = 9999.99\n\n for i in [3, 4]:\n measured_dist = helper.dist(self.cur_ball[X], self.cur_posture[i][X], self.cur_ball[Y],\n self.cur_posture[i][Y])\n all_dist.append(measured_dist)\n if (measured_dist < min_dist):\n min_dist = measured_dist\n atk_dist = min_dist\n min_idx = i\n\n self.atk_idx = min_idx\n\n # record the robot closer to the ball between the two too\n self.closest_order = np.argsort(all_dist) + 1\n\n # predict where the ball will be located after 'steps' steps\n def predict_ball_location(self, steps):\n dx = self.cur_ball[X] - self.prev_ball[X]\n dy = self.cur_ball[Y] - self.prev_ball[Y]\n return [self.cur_ball[X] + steps * dx, self.cur_ball[Y] + steps * dy]\n\n # let the robot face toward specific direction\n def face_specific_position(self, id, x, y):\n dx = x - self.cur_posture[id][X]\n dy = y - self.cur_posture[id][Y]\n\n desired_th = (math.pi / 2) if (dx == 0 and dy == 0) else math.atan2(dy, dx)\n\n self.angle(id, desired_th)\n\n # returns the angle toward a specific position from current robot posture\n def direction_angle(self, id, x, y):\n dx = x - self.cur_posture[id][X]\n dy = y - self.cur_posture[id][Y]\n\n return ((math.pi / 2) if (dx == 0 and dy == 0) else math.atan2(dy, dx))\n\n # turn to face 'desired_th' direction\n def angle(self, id, desired_th):\n mult_ang = 0.4\n\n d_th = desired_th - self.cur_posture[id][TH]\n d_th = helper.trim_radian(d_th)\n\n # the robot instead puts the direction rear if the angle difference is large\n if (d_th > helper.d2r(95)):\n d_th -= math.pi\n sign = -1\n elif (d_th < helper.d2r(-95)):\n d_th += math.pi\n sign = -1\n\n self.set_wheel_velocity(id, -mult_ang * d_th, mult_ang * d_th, False)\n\n # checks if a certain position is inside the penalty area of 'team'\n def in_penalty_area(self, obj, team):\n if (abs(obj[Y]) > self.penalty_area[Y] / 2):\n return False\n\n if (team == MY_TEAM):\n return (obj[X] < -self.field[X] / 2 + self.penalty_area[X])\n else:\n return (obj[X] > self.field[X] / 2 - self.penalty_area[X])\n\n # check if the ball is coming toward the robot\n def ball_coming_toward_robot(self, id):\n x_dir = abs(self.cur_posture[id][X] - self.prev_ball[X]) > abs(self.cur_posture[id][X] - self.cur_ball[X])\n y_dir = abs(self.cur_posture[id][Y] - self.prev_ball[Y]) > abs(self.cur_posture[id][Y] - self.cur_ball[Y])\n\n # ball is coming closer\n if (x_dir and y_dir):\n return True\n else:\n return False\n\n # check if the robot with id 'id' has a chance to shoot\n def shoot_chance(self, id):\n dx = self.cur_ball[X] - self.cur_posture[id][X]\n dy = self.cur_ball[Y] - self.cur_posture[id][Y]\n\n # if the ball is located further on left than the robot, it will be hard to shoot\n if (dx < 0):\n return False\n\n # if the robot->ball direction aligns with opponent's goal, the robot can shoot\n y = (self.field[X] / 2 - self.cur_ball[X]) * dy / dx + self.cur_posture[id][Y]\n if (abs(y) < self.goal[Y] / 2):\n return True\n else:\n return False\n\n @inlineCallbacks\n def on_event(self, f):\n\n @inlineCallbacks\n def set_wheel(self, robot_wheels):\n yield self.call(u'aiwc.set_speed', args.key, robot_wheels)\n return\n\n # a basic goalkeeper rulbased algorithm\n def goalkeeper(self, id):\n\n # self.set_target_position(id, 1, 1, 1.4, 5.0, 0.4, False)\n # return\n # default desired position\n x = (-self.field[X] / 2) + (self.robot_size[id] / 2) + 0.05\n y = max(min(self.cur_ball[Y], (self.goal[Y] / 2 - self.robot_size[id] / 2)),\n -self.goal[Y] / 2 + self.robot_size[id] / 2)\n\n # if the robot is inside the goal, try to get out\n if (self.cur_posture[id][X] < -self.field[X] / 2):\n if (self.cur_posture[id][Y] < 0):\n self.set_target_position(id, x, self.cur_posture[id][Y] + 0.2, 1.4, 5.0, 0.4, False)\n else:\n self.set_target_position(id, x, self.cur_posture[id][Y] - 0.2, 1.4, 5.0, 0.4, False)\n # if the goalkeeper is outside the penalty area\n elif (not self.in_penalty_area(self.cur_posture[id], MY_TEAM)):\n # return to the desired position\n self.set_target_position(id, x, y, 1.4, 5.0, 0.4, True)\n # if the goalkeeper is inside the penalty area\n else:\n # if the ball is inside the penalty area\n if (self.in_penalty_area(self.cur_ball, MY_TEAM)):\n # if the ball is behind the goalkeeper\n if (self.cur_ball[X] < self.cur_posture[id][X]):\n # if the ball is not blocking the goalkeeper's path\n if (abs(self.cur_ball[Y] - self.cur_posture[id][Y]) > 2 * self.robot_size[id]):\n # try to get ahead of the ball\n self.set_target_position(id, self.cur_ball[X] - self.robot_size[id], self.cur_posture[id][Y], 1.4, 5.0,\n 0.4, False)\n else:\n # just give up and try not to make a suicidal goal\n self.angle(id, math.pi / 2)\n # if the ball is ahead of the goalkeeper\n else:\n desired_th = self.direction_angle(id, self.cur_ball[X], self.cur_ball[Y])\n rad_diff = helper.trim_radian(desired_th - self.cur_posture[id][TH])\n # if the robot direction is too away from the ball direction\n if (rad_diff > math.pi / 3):\n # give up kicking the ball and block the goalpost\n self.set_target_position(id, x, y, 1.4, 5.0, 0.4, False)\n else:\n # try to kick the ball away from the goal\n self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 3.0, 0.8, True)\n # if the ball is not in the penalty area\n else:\n # if the ball is within alert range and y position is not too different\n if (self.cur_ball[X] < -self.field[X] / 2 + 1.5 * self.penalty_area[X] and abs(\n self.cur_ball[Y]) < 1.5 * self.penalty_area[Y] / 2 and abs(\n self.cur_ball[Y] - self.cur_posture[id][Y]) < 0.2):\n self.face_specific_position(id, self.cur_ball[X], self.cur_ball[Y])\n # otherwise\n else:\n self.set_target_position(id, x, y, 1.4, 5.0, 0.4, True)\n\n # a basic defender rulebased algorithm\n def defender(self, id):\n self.set_target_position(id, -6, 5, 1.4, 5.0, 0.4, False)\n return\n # # if the robot is inside the goal, try to get out\n # if (self.cur_posture[id][X] < -self.field[X] / 2):\n # if (self.cur_posture[id][Y] < 0):\n # self.set_target_position(id, -0.7 * self.field[X] / 2, self.cur_posture[id][Y] + 0.2, 1.4, 3.5, 0.6, False)\n # else:\n # self.set_target_position(id, -0.7 * self.field[X] / 2, self.cur_posture[id][Y] - 0.2, 1.4, 3.5, 0.6, False)\n # return\n # # the defender may try to shoot if condition meets\n # if (id == self.def_idx and self.shoot_chance(id) and self.cur_ball[X] < 0.3 * self.field[X] / 2):\n # self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, True)\n # return\n\n # # if this defender is closer to the ball than the other defender\n # if (id == self.def_idx):\n # # ball is on our side\n # if (self.cur_ball[X] < 0):\n # # if the robot can push the ball toward opponent's side, do it\n # if (self.cur_posture[id][X] < self.cur_ball[X] - self.ball_radius):\n # self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, True)\n # else:\n # # otherwise go behind the ball\n # if (abs(self.cur_ball[Y] - self.cur_posture[id][Y]) > 0.3):\n # self.set_target_position(id, max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2),\n # self.cur_ball[Y], 1.4, 3.5, 0.6, False)\n # else:\n # self.set_target_position(id, max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2),\n # self.cur_posture[id][Y], 1.4, 3.5, 0.6, False)\n # else:\n # self.set_target_position(id, -0.7 * self.field[X] / 2, self.cur_ball[Y], 1.4, 3.5, 0.4, False)\n # # if this defender is not closer to the ball than the other defender\n # else:\n # # ball is on our side\n # if (self.cur_ball[X] < 0):\n # # ball is on our left\n # if (self.cur_ball[Y] > self.goal[Y] / 2 + 0.15):\n # self.set_target_position(id,\n # max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2 + 0.1),\n # self.goal[Y] / 2 + 0.15, 1.4, 3.5, 0.4, False)\n # # ball is on our right\n # elif (self.cur_ball[Y] < -self.goal[Y] / 2 - 0.15):\n # self.set_target_position(id,\n # max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2 + 0.1),\n # -self.goal[Y] / 2 - 0.15, 1.4, 3.5, 0.4, False)\n # # ball is in center\n # else:\n # self.set_target_position(id,\n # max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2 + 0.1),\n # self.cur_ball[Y], 1.4, 3.5, 0.4, False)\n # else:\n # # ball is on right side\n # if (self.cur_ball[Y] < 0):\n # self.set_target_position(id, -0.7 * self.field[X] / 2,\n # min(self.cur_ball[Y] + 0.5, self.field[Y] / 2 - self.robot_size[id] / 2), 1.4,\n # 3.5, 0.4, False)\n # # ball is on left side\n # else:\n # self.set_target_position(id, -0.7 * self.field[X] / 2,\n # max(self.cur_ball[Y] - 0.5, -self.field[Y] / 2 + self.robot_size[id] / 2), 1.4,\n # 3.5, 0.4, False)\n\n # a basic forward rulebased algorithm\n def forward(self, id):\n self.set_target_position(id, -5, 5, 1.4, 3.5, 0.6, False)\n return\n # if the robot is blocking the ball's path toward opponent side\n # if (self.cur_ball[X] > -0.3 * self.field[X] / 2 and self.cur_ball[X] < 0.3 * self.field[X] / 2 and\n # self.cur_posture[id][X] > self.cur_ball[X] + 0.1 and abs(\n # self.cur_posture[id][Y] - self.cur_ball[Y]) < 0.3):\n # if (self.cur_ball[Y] < 0):\n # self.set_target_position(id, self.cur_posture[id][X] - 0.25, self.cur_ball[Y] + 0.75, 1.4, 3.0, 0.8, False)\n # else:\n # self.set_target_position(id, self.cur_posture[id][X] - 0.25, self.cur_ball[Y] - 0.75, 1.4, 3.0, 0.8, False)\n # return\n\n # # if the robot can shoot from current position\n # if (id == self.atk_idx and self.shoot_chance(id)):\n # pred_ball = self.predict_ball_location(2)\n # self.set_target_position(id, pred_ball[X], pred_ball[Y], 1.4, 5.0, 0.4, True)\n # return\n\n # # if the ball is coming toward the robot, seek for shoot chance\n # if (id == self.atk_idx and self.ball_coming_toward_robot(id)):\n # dx = self.cur_ball[X] - self.prev_ball[X]\n # dy = self.cur_ball[Y] - self.prev_ball[Y]\n # pred_x = (self.cur_posture[id][Y] - self.cur_ball[Y]) * dx / dy + self.cur_ball[X]\n # steps = (self.cur_posture[id][Y] - self.cur_ball[Y]) / dy\n\n # # if the ball will be located in front of the robot\n # if (pred_x > self.cur_posture[id][X]):\n # pred_dist = pred_x - self.cur_posture[id][X]\n # # if the predicted ball location is close enough\n # if (pred_dist > 0.1 and pred_dist < 0.3 and steps < 10):\n # # find the direction towards the opponent goal and look toward it\n # goal_angle = self.direction_angle(id, self.field[X] / 2, 0)\n # self.angle(id, goal_angle)\n # return\n\n # # if this forward is closer to the ball than the other forward\n # if (id == self.atk_idx):\n # if (self.cur_ball[X] > -0.3 * self.field[X] / 2):\n # # if the robot can push the ball toward opponent's side, do it\n # if (self.cur_posture[id][X] < self.cur_ball[X] - self.ball_radius):\n # self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, True)\n # else:\n # # otherwise go behind the ball\n # if (abs(self.cur_ball[Y] - self.cur_posture[id][Y]) > 0.3):\n # self.set_target_position(id, self.cur_ball[X] - 0.2, self.cur_ball[Y], 1.4, 3.5, 0.6, False)\n # else:\n # self.set_target_position(id, self.cur_ball[X] - 0.2, self.cur_posture[id][Y], 1.4, 3.5, 0.6, False)\n # else:\n # self.set_target_position(id, -0.1 * self.field[X] / 2, self.cur_ball[Y], 1.4, 3.5, 0.4, False)\n # # if this forward is not closer to the ball than the other forward\n # else:\n # if (self.cur_ball[X] > -0.3 * self.field[X] / 2):\n # # ball is on our right\n # if (self.cur_ball[Y] < 0):\n # self.set_target_position(id, self.cur_ball[X] - 0.25, self.goal[Y] / 2, 1.4, 3.5, 0.4, False)\n # # ball is on our left\n # else:\n # self.set_target_position(id, self.cur_ball[X] - 0.25, -self.goal[Y] / 2, 1.4, 3.5, 0.4, False)\n # else:\n # # ball is on right side\n # if (self.cur_ball[Y] < 0):\n # self.set_target_position(id, -0.1 * self.field[X] / 2,\n # min(-self.cur_ball[Y] - 0.5, self.field[Y] / 2 - self.robot_size[id] / 2), 1.4,\n # 3.5, 0.4, False)\n # # ball is on left side\n # else:\n # self.set_target_position(id, -0.1 * self.field[X] / 2,\n # max(-self.cur_ball[Y] + 0.5, -self.field[Y] / 2 + self.robot_size[id] / 2), 1.4,\n # 3.5, 0.4, False)\n\n #가장 빠른 공격수 공격\n def attack(self, id):\n self.face_specific_position(id, self.cur_ball[X], self.cur_ball[Y])\n self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, False)\n # self.atk_idx may try to shoot if condition meets\n if (self.shoot_chance(id) and self.cur_ball[X] < 0.3 * self.field[X] / 2):\n self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, True)\n\n #자살골용 주변 분산\n def go_away(self, id):\n self.set_target_position(id, 3, 2.5, 1.4, 5.0, 0.4, True)\n return\n\n # initiate empty frame\n if (self.end_of_frame):\n self.received_frame = Frame()\n self.end_of_frame = False\n received_subimages = []\n\n if 'time' in f:\n self.received_frame.time = f['time']\n if 'score' in f:\n self.received_frame.score = f['score']\n if 'reset_reason' in f:\n self.received_frame.reset_reason = f['reset_reason']\n if 'game_state' in f:\n self.received_frame.game_state = f['game_state']\n if 'ball_ownership' in f:\n self.received_frame.ball_ownership = f['ball_ownership']\n if 'half_passed' in f:\n self.received_frame.half_passed = f['half_passed']\n if 'subimages' in f:\n self.received_frame.subimages = f['subimages']\n for s in self.received_frame.subimages:\n received_subimages.append(SubImage(s['x'],\n s['y'],\n s['w'],\n s['h'],\n s['base64'].encode('utf8')))\n self.image.update_image(received_subimages)\n if 'coordinates' in f:\n self.received_frame.coordinates = f['coordinates']\n if 'EOF' in f:\n self.end_of_frame = f['EOF']\n\n if (self.end_of_frame):\n # to get the image at the end of each frame use the variable:\n # self.image.ImageBuffer\n\n if (self.received_frame.reset_reason != NONE):\n self.previous_frame = self.received_frame\n\n self.get_coord()\n self.find_closest_robot()\n\n if (self.received_frame.reset_reason == EPISODE_END):\n # EPISODE_END is sent instead of GAME_END when 'repeat' option is set to 'true'\n # to mark the end of episode\n # you can reinitialize the parameters, count the number of episodes done, etc. here\n\n # this example does not do anything at episode end\n pass\n\n if (self.received_frame.reset_reason == HALFTIME):\n # halftime is met - from next frame, self.received_frame.half_passed will be set to True\n # although the simulation switches sides,\n # coordinates and images given to your AI soccer algorithm will stay the same\n # that your team is red and located on left side whether it is 1st half or 2nd half\n\n # this example does not do anything at halftime\n pass\n\n ##############################################################################\n if (self.received_frame.game_state == STATE_DEFAULT):\n # robot functions in STATE_DEFAULT\n\n #go_away(self,0)\n\n # 골키퍼 다른구역에 보내기\n #self.set_target_position(0, -2, 2.5, 1.4, 5.0, 0.4, True)\n\n # 골키퍼역할 제대로하기\n goalkeeper(self, 0)\n\n # 수비(1,2) 공격(3,4) 명령\n # defender(self, 1)\n # defender(self, 2)\n # forward(self, 3)\n # forward(self, 4)\n\n # 블루팀의 공격(1,공을 찾아 드리블 2.슛찬스가 났을시 슈팅시도)\n # attack(self,4)\n # attack(self,1)\n # attack(self,2)\n # attack(self,3)\n\n # 선수들 특정 영역으로 보내기\n # self.set_target_position(1, 3, 3, 1.4, 5.0, 0.4, True)\n # self.set_target_position(2, 3, 3, 1.4, 5.0, 0.4, True)\n # self.set_target_position(3, 3, 3, 1.4, 5.0, 0.4, True)\n # self.set_target_position(4, 3, 3, 1.4, 5.0, 0.4, True)\n\n self.printConsole(\"blue team : STATE_DEFAULT\")\n\n ##특정 위치로 모든 로봇을 옮기기\n #goalkeeper(self, 0)\n\n set_wheel(self, self.wheels)\n return\n ##############################################################################\n elif (self.received_frame.game_state == STATE_KICKOFF):\n # if the ball belongs to my team, initiate kickoff\n # if (self.received_frame.ball_ownership):\n # self.set_target_position(4, 0, 0, 1.4, 3.0, 0.4, False)\n\n # defender(self, 1)\n # defender(self, 2)\n # forward(self, 3)\n # forward(self, 4)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n elif (self.received_frame.game_state == STATE_GOALKICK):\n # if the ball belongs to my team,\n # drive the goalkeeper to kick the ball\n if (self.received_frame.ball_ownership):\n self.set_wheel_velocity(0, self.max_linear_velocity[0], self.max_linear_velocity[0], True)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n elif (self.received_frame.game_state == STATE_CORNERKICK):\n # just play as simple as possible\n# goalkeeper(self, 0)\n# defender(self, 1)\n# defender(self, 2)\n forward(self, 3)\n forward(self, 4)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n elif (self.received_frame.game_state == STATE_PENALTYKICK):\n # if the ball belongs to my team,\n # drive the forward to kick the ball\n if (self.received_frame.ball_ownership):\n self.set_wheel_velocity(4, self.max_linear_velocity[0], self.max_linear_velocity[0], True)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n if (self.received_frame.reset_reason == GAME_END):\n # (virtual finish() in random_walk.cpp)\n # save your data\n with open(args.datapath + '/result.txt', 'w') as output:\n # output.write('yourvariables')\n output.close()\n # unsubscribe; reset or leave\n yield self.sub.unsubscribe()\n try:\n yield self.leave()\n except Exception as e:\n self.printConsole(\"Error: {}\".format(e))\n ##############################################################################\n\n self.end_of_frame = False\n self.previous_frame = self.received_frame\n\n def onDisconnect(self):\n if reactor.running:\n reactor.stop()\n\n\nif __name__ == '__main__':\n\n try:\n unicode\n except NameError:\n # Define 'unicode' for Python 3\n def unicode(s, *_):\n return s\n\n\n def to_unicode(s):\n return unicode(s, \"utf-8\")\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"server_ip\", type=to_unicode)\n parser.add_argument(\"port\", type=to_unicode)\n parser.add_argument(\"realm\", type=to_unicode)\n parser.add_argument(\"key\", type=to_unicode)\n parser.add_argument(\"datapath\", type=to_unicode)\n\n args = parser.parse_args()\n\n ai_sv = \"rs://\" + args.server_ip + \":\" + args.port\n ai_realm = args.realm\n\n # create a Wamp session object\n session = Component(ComponentConfig(ai_realm, {}))\n\n # initialize the msgpack serializer\n serializer = MsgPackSerializer()\n\n # use Wamp-over-rawsocket\n runner = ApplicationRunner(ai_sv, ai_realm, serializers=[serializer])\n\n runner.run(session, auto_reconnect=False)\n"
] |
[
[
"numpy.argsort",
"numpy.fromstring",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qibaoyuan/fairseq
|
[
"eabd07fdcfd5b007d05428e81a31b7f3fc5de959",
"eabd07fdcfd5b007d05428e81a31b7f3fc5de959",
"208295dfc76492748500f97a4f9a808d8053a184"
] |
[
"fairseq/models/roberta/hub_interface.py",
"fairseq/data/indexed_dataset.py",
"tests/test_binaries.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq.data import encoders\n\n\nclass RobertaHubInterface(nn.Module):\n \"\"\"A simple PyTorch Hub interface to RoBERTa.\n\n Usage: https://github.com/pytorch/fairseq/tree/master/examples/roberta\n \"\"\"\n\n def __init__(self, args, task, model):\n super().__init__()\n self.args = args\n self.task = task\n self.model = model\n\n self.bpe = encoders.build_bpe(args)\n\n # this is useful for determining the device\n self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))\n\n @property\n def device(self):\n return self._float_tensor.device\n\n def encode(self, sentence: str, *addl_sentences) -> torch.LongTensor:\n bpe_sentence = '<s> ' + self.bpe.encode(sentence) + ' </s>'\n for s in addl_sentences:\n bpe_sentence += ' </s> ' + self.bpe.encode(s)\n tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=True)\n return tokens.long()\n\n def extract_features(self, tokens: torch.LongTensor, return_all_hiddens=False) -> torch.Tensor:\n if tokens.dim() == 1:\n tokens = tokens.unsqueeze(0)\n if tokens.size(-1) > self.model.max_positions():\n raise ValueError('tokens exceeds maximum length: {} > {}'.format(\n tokens.size(-1), self.model.max_positions()\n ))\n features, extra = self.model(\n tokens.to(device=self.device),\n features_only=True,\n return_all_hiddens=return_all_hiddens,\n )\n if return_all_hiddens:\n # convert from T x B x C -> B x T x C\n inner_states = extra['inner_states']\n return [inner_state.transpose(0, 1) for inner_state in inner_states]\n else:\n return features # just the last layer's features\n\n def register_classification_head(\n self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs\n ):\n self.model.register_classification_head(\n name, num_classes=num_classes, embedding_size=embedding_size, **kwargs\n )\n\n def predict(self, head: str, tokens: torch.LongTensor):\n features = self.extract_features(tokens)\n logits = self.model.classification_heads[head](features)\n return F.log_softmax(logits, dim=-1)\n",
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom functools import lru_cache\nimport os\nimport shutil\nimport struct\n\nimport numpy as np\nimport torch\n\nfrom . import FairseqDataset\n\n\ndef __best_fitting_dtype(vocab_size=None):\n if vocab_size is not None and vocab_size < 65500:\n return np.uint16\n else:\n return np.int32\n\n\ndef get_available_dataset_impl():\n return ['raw', 'lazy', 'cached', 'mmap']\n\n\ndef infer_dataset_impl(path):\n if IndexedRawTextDataset.exists(path):\n return 'raw'\n elif IndexedDataset.exists(path):\n with open(index_file_path(path), 'rb') as f:\n magic = f.read(8)\n if magic == IndexedDataset._HDR_MAGIC:\n return 'cached'\n elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:\n return 'mmap'\n else:\n return None\n else:\n return None\n\n\ndef make_builder(out_file, impl, vocab_size=None):\n if impl == 'mmap':\n return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))\n else:\n return IndexedDatasetBuilder(out_file)\n\n\ndef make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):\n if impl == 'raw' and IndexedRawTextDataset.exists(path):\n assert dictionary is not None\n return IndexedRawTextDataset(path, dictionary)\n elif impl == 'lazy' and IndexedDataset.exists(path):\n return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)\n elif impl == 'cached' and IndexedDataset.exists(path):\n return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)\n elif impl == 'mmap' and MMapIndexedDataset.exists(path):\n return MMapIndexedDataset(path)\n return None\n\n\ndef dataset_exists(path, impl):\n if impl == 'raw':\n return IndexedRawTextDataset.exists(path)\n elif impl == 'mmap':\n return MMapIndexedDataset.exists(path)\n else:\n return IndexedDataset.exists(path)\n\n\ndef read_longs(f, n):\n a = np.empty(n, dtype=np.int64)\n f.readinto(a)\n return a\n\n\ndef write_longs(f, a):\n f.write(np.array(a, dtype=np.int64))\n\n\ndtypes = {\n 1: np.uint8,\n 2: np.int8,\n 3: np.int16,\n 4: np.int32,\n 5: np.int64,\n 6: np.float,\n 7: np.double,\n 8: np.uint16\n}\n\n\ndef code(dtype):\n for k in dtypes.keys():\n if dtypes[k] == dtype:\n return k\n raise ValueError(dtype)\n\n\ndef index_file_path(prefix_path):\n return prefix_path + '.idx'\n\n\ndef data_file_path(prefix_path):\n return prefix_path + '.bin'\n\n\nclass IndexedDataset(FairseqDataset):\n \"\"\"Loader for TorchNet IndexedDataset\"\"\"\n _HDR_MAGIC = b'TNTIDX\\x00\\x00'\n\n def __init__(self, path, fix_lua_indexing=False):\n super().__init__()\n self.path = path\n self.fix_lua_indexing = fix_lua_indexing\n self.data_file = None\n self.read_index(path)\n\n def read_index(self, path):\n with open(index_file_path(path), 'rb') as f:\n magic = f.read(8)\n assert magic == self._HDR_MAGIC, (\n 'Index file doesn\\'t match expected format. '\n 'Make sure that --dataset-impl is configured properly.'\n )\n version = f.read(8)\n assert struct.unpack('<Q', version) == (1,)\n code, self.element_size = struct.unpack('<QQ', f.read(16))\n self.dtype = dtypes[code]\n self._len, self.s = struct.unpack('<QQ', f.read(16))\n self.dim_offsets = read_longs(f, self._len + 1)\n self.data_offsets = read_longs(f, self._len + 1)\n self.sizes = read_longs(f, self.s)\n\n def read_data(self, path):\n self.data_file = open(data_file_path(path), 'rb', buffering=0)\n\n def check_index(self, i):\n if i < 0 or i >= self._len:\n raise IndexError('index out of range')\n\n def __del__(self):\n if self.data_file:\n self.data_file.close()\n\n @lru_cache(maxsize=8)\n def __getitem__(self, i):\n if not self.data_file:\n self.read_data(self.path)\n self.check_index(i)\n tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]\n a = np.empty(tensor_size, dtype=self.dtype)\n self.data_file.seek(self.data_offsets[i] * self.element_size)\n self.data_file.readinto(a)\n item = torch.from_numpy(a).long()\n if self.fix_lua_indexing:\n item -= 1 # subtract 1 for 0-based indexing\n return item\n\n def __len__(self):\n return self._len\n\n def num_tokens(self, index):\n return self.sizes[index]\n\n def size(self, index):\n return self.sizes[index]\n\n @staticmethod\n def exists(path):\n return (\n os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))\n )\n\n @property\n def supports_prefetch(self):\n return False # avoid prefetching to save memory\n\n\nclass IndexedCachedDataset(IndexedDataset):\n\n def __init__(self, path, fix_lua_indexing=False):\n super().__init__(path, fix_lua_indexing=fix_lua_indexing)\n self.cache = None\n self.cache_index = {}\n\n @property\n def supports_prefetch(self):\n return True\n\n def prefetch(self, indices):\n if all(i in self.cache_index for i in indices):\n return\n if not self.data_file:\n self.read_data(self.path)\n indices = sorted(set(indices))\n total_size = 0\n for i in indices:\n total_size += self.data_offsets[i + 1] - self.data_offsets[i]\n self.cache = np.empty(total_size, dtype=self.dtype)\n ptx = 0\n self.cache_index.clear()\n for i in indices:\n self.cache_index[i] = ptx\n size = self.data_offsets[i + 1] - self.data_offsets[i]\n a = self.cache[ptx: ptx + size]\n self.data_file.seek(self.data_offsets[i] * self.element_size)\n self.data_file.readinto(a)\n ptx += size\n if self.data_file:\n # close and delete data file after prefetch so we can pickle\n self.data_file.close()\n self.data_file = None\n\n @lru_cache(maxsize=8)\n def __getitem__(self, i):\n self.check_index(i)\n tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]\n a = np.empty(tensor_size, dtype=self.dtype)\n ptx = self.cache_index[i]\n np.copyto(a, self.cache[ptx: ptx + a.size])\n item = torch.from_numpy(a).long()\n if self.fix_lua_indexing:\n item -= 1 # subtract 1 for 0-based indexing\n return item\n\n\nclass IndexedRawTextDataset(FairseqDataset):\n \"\"\"Takes a text file as input and binarizes it in memory at instantiation.\n Original lines are also kept in memory\"\"\"\n\n def __init__(self, path, dictionary, append_eos=True, reverse_order=False):\n self.tokens_list = []\n self.lines = []\n self.sizes = []\n self.append_eos = append_eos\n self.reverse_order = reverse_order\n self.read_data(path, dictionary)\n self.size = len(self.tokens_list)\n\n def read_data(self, path, dictionary):\n with open(path, 'r', encoding='utf-8') as f:\n for line in f:\n self.lines.append(line.strip('\\n'))\n tokens = dictionary.encode_line(\n line, add_if_not_exist=False,\n append_eos=self.append_eos, reverse_order=self.reverse_order,\n ).long()\n self.tokens_list.append(tokens)\n self.sizes.append(len(tokens))\n self.sizes = np.array(self.sizes)\n\n def check_index(self, i):\n if i < 0 or i >= self.size:\n raise IndexError('index out of range')\n\n @lru_cache(maxsize=8)\n def __getitem__(self, i):\n self.check_index(i)\n return self.tokens_list[i]\n\n def get_original_text(self, i):\n self.check_index(i)\n return self.lines[i]\n\n def __del__(self):\n pass\n\n def __len__(self):\n return self.size\n\n def num_tokens(self, index):\n return self.sizes[index]\n\n def size(self, index):\n return self.sizes[index]\n\n @staticmethod\n def exists(path):\n return os.path.exists(path)\n\n\nclass IndexedDatasetBuilder(object):\n element_sizes = {\n np.uint8: 1,\n np.int8: 1,\n np.int16: 2,\n np.int32: 4,\n np.int64: 8,\n np.float: 4,\n np.double: 8\n }\n\n def __init__(self, out_file, dtype=np.int32):\n self.out_file = open(out_file, 'wb')\n self.dtype = dtype\n self.data_offsets = [0]\n self.dim_offsets = [0]\n self.sizes = []\n self.element_size = self.element_sizes[self.dtype]\n\n def add_item(self, tensor):\n # +1 for Lua compatibility\n bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))\n self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)\n for s in tensor.size():\n self.sizes.append(s)\n self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))\n\n def merge_file_(self, another_file):\n index = IndexedDataset(another_file)\n assert index.dtype == self.dtype\n\n begin = self.data_offsets[-1]\n for offset in index.data_offsets[1:]:\n self.data_offsets.append(begin + offset)\n self.sizes.extend(index.sizes)\n begin = self.dim_offsets[-1]\n for dim_offset in index.dim_offsets[1:]:\n self.dim_offsets.append(begin + dim_offset)\n\n with open(data_file_path(another_file), 'rb') as f:\n while True:\n data = f.read(1024)\n if data:\n self.out_file.write(data)\n else:\n break\n\n def finalize(self, index_file):\n self.out_file.close()\n index = open(index_file, 'wb')\n index.write(b'TNTIDX\\x00\\x00')\n index.write(struct.pack('<Q', 1))\n index.write(struct.pack('<QQ', code(self.dtype), self.element_size))\n index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))\n write_longs(index, self.dim_offsets)\n write_longs(index, self.data_offsets)\n write_longs(index, self.sizes)\n index.close()\n\n\ndef _warmup_mmap_file(path):\n with open(path, 'rb') as stream:\n while stream.read(100 * 1024 * 1024):\n pass\n\n\nclass MMapIndexedDataset(torch.utils.data.Dataset):\n class Index(object):\n _HDR_MAGIC = b'MMIDIDX\\x00\\x00'\n\n @classmethod\n def writer(cls, path, dtype):\n class _Writer(object):\n def __enter__(self):\n self._file = open(path, 'wb')\n\n self._file.write(cls._HDR_MAGIC)\n self._file.write(struct.pack('<Q', 1))\n self._file.write(struct.pack('<B', code(dtype)))\n\n return self\n\n @staticmethod\n def _get_pointers(sizes):\n dtype_size = dtype().itemsize\n address = 0\n pointers = []\n\n for size in sizes:\n pointers.append(address)\n address += size * dtype_size\n\n return pointers\n\n def write(self, sizes):\n pointers = self._get_pointers(sizes)\n\n self._file.write(struct.pack('<Q', len(sizes)))\n\n sizes = np.array(sizes, dtype=np.int32)\n self._file.write(sizes.tobytes(order='C'))\n del sizes\n\n pointers = np.array(pointers, dtype=np.int64)\n self._file.write(pointers.tobytes(order='C'))\n del pointers\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._file.close()\n\n return _Writer()\n\n def __init__(self, path):\n with open(path, 'rb') as stream:\n magic_test = stream.read(9)\n assert self._HDR_MAGIC == magic_test, (\n 'Index file doesn\\'t match expected format. '\n 'Make sure that --dataset-impl is configured properly.'\n )\n version = struct.unpack('<Q', stream.read(8))\n assert (1,) == version\n\n dtype_code, = struct.unpack('<B', stream.read(1))\n self._dtype = dtypes[dtype_code]\n self._dtype_size = self._dtype().itemsize\n\n self._len = struct.unpack('<Q', stream.read(8))[0]\n offset = stream.tell()\n\n _warmup_mmap_file(path)\n\n self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')\n self._bin_buffer = memoryview(self._bin_buffer_mmap)\n self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)\n self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,\n offset=offset + self._sizes.nbytes)\n\n def __del__(self):\n self._bin_buffer_mmap._mmap.close()\n del self._bin_buffer_mmap\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def sizes(self):\n return self._sizes\n\n @lru_cache(maxsize=8)\n def __getitem__(self, i):\n return self._pointers[i], self._sizes[i]\n\n def __len__(self):\n return self._len\n\n def __init__(self, path):\n super().__init__()\n\n self._path = None\n self._index = None\n self._bin_buffer = None\n\n self._do_init(path)\n\n def __getstate__(self):\n return self._path\n\n def __setstate__(self, state):\n self._do_init(state)\n\n def _do_init(self, path):\n self._path = path\n self._index = self.Index(index_file_path(self._path))\n\n _warmup_mmap_file(data_file_path(self._path))\n self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')\n self._bin_buffer = memoryview(self._bin_buffer_mmap)\n\n def __del__(self):\n self._bin_buffer_mmap._mmap.close()\n del self._bin_buffer_mmap\n del self._index\n\n def __len__(self):\n return len(self._index)\n\n @lru_cache(maxsize=8)\n def __getitem__(self, i):\n ptr, size = self._index[i]\n np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)\n if self._index.dtype != np.int64:\n np_array = np_array.astype(np.int64)\n\n return torch.from_numpy(np_array)\n\n @property\n def sizes(self):\n return self._index.sizes\n\n @property\n def supports_prefetch(self):\n return False\n\n @staticmethod\n def exists(path):\n return (\n os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))\n )\n\n\nclass MMapIndexedDatasetBuilder(object):\n def __init__(self, out_file, dtype=np.int64):\n self._data_file = open(out_file, 'wb')\n self._dtype = dtype\n self._sizes = []\n\n def add_item(self, tensor):\n np_array = np.array(tensor.numpy(), dtype=self._dtype)\n self._data_file.write(np_array.tobytes(order='C'))\n self._sizes.append(np_array.size)\n\n def merge_file_(self, another_file):\n # Concatenate index\n index = MMapIndexedDataset.Index(index_file_path(another_file))\n assert index.dtype == self._dtype\n\n for size in index.sizes:\n self._sizes.append(size)\n\n # Concatenate data\n with open(data_file_path(another_file), 'rb') as f:\n shutil.copyfileobj(f, self._data_file)\n\n def finalize(self, index_file):\n self._data_file.close()\n\n with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:\n index.write(self._sizes)\n",
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport contextlib\nfrom io import StringIO\nimport os\nimport random\nimport sys\nimport tempfile\nimport unittest\n\nimport torch\n\nfrom fairseq import options\n\nimport preprocess\nimport train\nimport generate\nimport interactive\nimport eval_lm\n\n\nclass TestTranslation(unittest.TestCase):\n\n def test_fconv(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_fconv') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'fconv_iwslt_de_en')\n generate_main(data_dir)\n\n def test_raw(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])\n train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])\n generate_main(data_dir, ['--dataset-impl', 'raw'])\n\n def test_fp16(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_fp16') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16'])\n generate_main(data_dir)\n\n def test_memory_efficient_fp16(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_memory_efficient_fp16') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--memory-efficient-fp16'])\n generate_main(data_dir)\n\n def test_update_freq(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_update_freq') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])\n generate_main(data_dir)\n\n def test_max_positions(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_max_positions') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n with self.assertRaises(Exception) as context:\n train_translation_model(\n data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'],\n )\n self.assertTrue(\n 'skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)\n )\n train_translation_model(\n data_dir, 'fconv_iwslt_de_en',\n ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'],\n )\n with self.assertRaises(Exception) as context:\n generate_main(data_dir)\n generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])\n\n def test_generation(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_sampling') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'fconv_iwslt_de_en')\n generate_main(data_dir, [\n '--sampling',\n '--temperature', '2',\n '--beam', '2',\n '--nbest', '2',\n ])\n generate_main(data_dir, [\n '--sampling',\n '--sampling-topk', '3',\n '--beam', '2',\n '--nbest', '2',\n ])\n generate_main(data_dir, [\n '--sampling',\n '--sampling-topp', '0.2',\n '--beam', '2',\n '--nbest', '2',\n ])\n generate_main(data_dir, ['--prefix-size', '2'])\n\n def test_lstm(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_lstm') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [\n '--encoder-layers', '2',\n '--decoder-layers', '2',\n '--encoder-embed-dim', '8',\n '--decoder-embed-dim', '8',\n '--decoder-out-embed-dim', '8',\n ])\n generate_main(data_dir)\n\n def test_lstm_bidirectional(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'lstm', [\n '--encoder-layers', '2',\n '--encoder-bidirectional',\n '--encoder-hidden-size', '16',\n '--encoder-embed-dim', '8',\n '--decoder-embed-dim', '8',\n '--decoder-out-embed-dim', '8',\n '--decoder-layers', '2',\n ])\n generate_main(data_dir)\n\n def test_transformer(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_transformer') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'transformer_iwslt_de_en', [\n '--encoder-layers', '2',\n '--decoder-layers', '2',\n '--encoder-embed-dim', '8',\n '--decoder-embed-dim', '8',\n ])\n generate_main(data_dir)\n\n def test_lightconv(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_lightconv') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'lightconv_iwslt_de_en', [\n '--encoder-conv-type', 'lightweight',\n '--decoder-conv-type', 'lightweight',\n '--encoder-embed-dim', '8',\n '--decoder-embed-dim', '8',\n ])\n generate_main(data_dir)\n\n def test_dynamicconv(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'lightconv_iwslt_de_en', [\n '--encoder-conv-type', 'dynamic',\n '--decoder-conv-type', 'dynamic',\n '--encoder-embed-dim', '8',\n '--decoder-embed-dim', '8',\n ])\n generate_main(data_dir)\n\n def test_mixture_of_experts(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_moe') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n train_translation_model(data_dir, 'transformer_iwslt_de_en', [\n '--task', 'translation_moe',\n '--method', 'hMoElp',\n '--mean-pool-gating-network',\n '--num-experts', '3',\n '--encoder-layers', '2',\n '--decoder-layers', '2',\n '--encoder-embed-dim', '8',\n '--decoder-embed-dim', '8',\n ])\n generate_main(data_dir, [\n '--task', 'translation_moe',\n '--method', 'hMoElp',\n '--mean-pool-gating-network',\n '--num-experts', '3',\n '--gen-expert', '0'\n ])\n\n\nclass TestStories(unittest.TestCase):\n\n def test_fconv_self_att_wp(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir:\n create_dummy_data(data_dir)\n preprocess_translation_data(data_dir)\n config = [\n '--encoder-layers', '[(128, 3)] * 2',\n '--decoder-layers', '[(128, 3)] * 2',\n '--decoder-attention', 'True',\n '--encoder-attention', 'False',\n '--gated-attention', 'True',\n '--self-attention', 'True',\n '--project-input', 'True',\n '--encoder-embed-dim', '8',\n '--decoder-embed-dim', '8',\n '--decoder-out-embed-dim', '8',\n '--multihead-self-attention-nheads', '2'\n ]\n train_translation_model(data_dir, 'fconv_self_att_wp', config)\n generate_main(data_dir)\n\n # fusion model\n os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt'))\n config.extend([\n '--pretrained', 'True',\n '--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'),\n '--save-dir', os.path.join(data_dir, 'fusion_model'),\n ])\n train_translation_model(data_dir, 'fconv_self_att_wp', config)\n\n\nclass TestLanguageModeling(unittest.TestCase):\n\n def test_fconv_lm(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir:\n create_dummy_data(data_dir)\n preprocess_lm_data(data_dir)\n train_language_model(data_dir, 'fconv_lm', [\n '--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',\n '--decoder-embed-dim', '280',\n '--optimizer', 'nag',\n '--lr', '0.1',\n ])\n eval_lm_main(data_dir)\n\n def test_transformer_lm(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:\n create_dummy_data(data_dir)\n preprocess_lm_data(data_dir)\n train_language_model(data_dir, 'transformer_lm', ['--add-bos-token'])\n eval_lm_main(data_dir)\n\n\nclass TestMaskedLanguageModel(unittest.TestCase):\n\n def test_legacy_masked_lm(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory(\"test_legacy_mlm\") as data_dir:\n create_dummy_data(data_dir)\n preprocess_lm_data(data_dir)\n train_legacy_masked_language_model(data_dir, \"masked_lm\")\n\n def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory(\"test_mlm\") as data_dir:\n create_dummy_data(data_dir)\n preprocess_lm_data(data_dir)\n train_legacy_masked_language_model(\n data_dir,\n arch=\"masked_lm\",\n extra_args=('--encoder-learned-pos',) if learned_pos_emb else ()\n )\n with tempfile.TemporaryDirectory(\n \"test_mlm_translation\"\n ) as translation_dir:\n create_dummy_data(translation_dir)\n preprocess_translation_data(\n translation_dir, extra_flags=[\"--joined-dictionary\"]\n )\n # Train transformer with data_dir/checkpoint_last.pt\n train_translation_model(\n translation_dir,\n arch=\"transformer_from_pretrained_xlm\",\n extra_flags=[\n \"--decoder-layers\",\n \"1\",\n \"--decoder-embed-dim\",\n \"32\",\n \"--decoder-attention-heads\",\n \"1\",\n \"--decoder-ffn-embed-dim\",\n \"32\",\n \"--encoder-layers\",\n \"1\",\n \"--encoder-embed-dim\",\n \"32\",\n \"--encoder-attention-heads\",\n \"1\",\n \"--encoder-ffn-embed-dim\",\n \"32\",\n \"--pretrained-xlm-checkpoint\",\n \"{}/checkpoint_last.pt\".format(data_dir),\n \"--activation-fn\",\n \"gelu\",\n \"--max-source-positions\",\n \"500\",\n \"--max-target-positions\",\n \"500\",\n ] + (\n [\"--encoder-learned-pos\", \"--decoder-learned-pos\"]\n if learned_pos_emb else []\n ) + (['--init-encoder-only'] if encoder_only else []),\n task=\"translation_from_pretrained_xlm\",\n )\n\n def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):\n self._test_pretrained_masked_lm_for_translation(True, False)\n\n def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):\n self._test_pretrained_masked_lm_for_translation(False, False)\n\n def test_pretrained_masked_lm_for_translation_encoder_only(self):\n self._test_pretrained_masked_lm_for_translation(True, True)\n\n\ndef train_legacy_masked_language_model(data_dir, arch, extra_args=()):\n train_parser = options.get_training_parser()\n # TODO: langs should be in and out right?\n train_args = options.parse_args_and_arch(\n train_parser,\n [\n \"--task\",\n \"cross_lingual_lm\",\n data_dir,\n \"--arch\",\n arch,\n # Optimizer args\n \"--optimizer\",\n \"adam\",\n \"--lr-scheduler\",\n \"reduce_lr_on_plateau\",\n \"--lr-shrink\",\n \"0.5\",\n \"--lr\",\n \"0.0001\",\n \"--min-lr\",\n \"1e-09\",\n # dropout, attention args\n \"--dropout\",\n \"0.1\",\n \"--attention-dropout\",\n \"0.1\",\n # MLM args\n \"--criterion\",\n \"legacy_masked_lm_loss\",\n \"--masked-lm-only\",\n \"--monolingual-langs\",\n \"in,out\",\n \"--num-segment\",\n \"5\",\n # Transformer args: use a small transformer model for fast training\n \"--encoder-layers\",\n \"1\",\n \"--encoder-embed-dim\",\n \"32\",\n \"--encoder-attention-heads\",\n \"1\",\n \"--encoder-ffn-embed-dim\",\n \"32\",\n # Other training args\n \"--max-tokens\",\n \"500\",\n \"--tokens-per-sample\",\n \"500\",\n \"--save-dir\",\n data_dir,\n \"--max-epoch\",\n \"1\",\n \"--no-progress-bar\",\n \"--distributed-world-size\",\n \"1\",\n \"--dataset-impl\",\n \"raw\",\n ] + list(extra_args),\n )\n train.main(train_args)\n\n\nclass TestCommonOptions(unittest.TestCase):\n\n def test_optimizers(self):\n with contextlib.redirect_stdout(StringIO()):\n with tempfile.TemporaryDirectory('test_optimizers') as data_dir:\n # Use just a bit of data and tiny model to keep this test runtime reasonable\n create_dummy_data(data_dir, num_examples=10, maxlen=5)\n preprocess_translation_data(data_dir)\n optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta']\n last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt')\n for optimizer in optimizers:\n if os.path.exists(last_checkpoint):\n os.remove(last_checkpoint)\n train_translation_model(data_dir, 'lstm', [\n '--required-batch-size-multiple', '1',\n '--encoder-layers', '1',\n '--encoder-hidden-size', '32',\n '--decoder-layers', '1',\n '--optimizer', optimizer,\n ])\n generate_main(data_dir)\n\n\ndef create_dummy_data(data_dir, num_examples=1000, maxlen=20):\n\n def _create_dummy_data(filename):\n data = torch.rand(num_examples * maxlen)\n data = 97 + torch.floor(26 * data).int()\n with open(os.path.join(data_dir, filename), 'w') as h:\n offset = 0\n for _ in range(num_examples):\n ex_len = random.randint(1, maxlen)\n ex_str = ' '.join(map(chr, data[offset:offset+ex_len]))\n print(ex_str, file=h)\n offset += ex_len\n\n _create_dummy_data('train.in')\n _create_dummy_data('train.out')\n _create_dummy_data('valid.in')\n _create_dummy_data('valid.out')\n _create_dummy_data('test.in')\n _create_dummy_data('test.out')\n\n\ndef preprocess_translation_data(data_dir, extra_flags=None):\n preprocess_parser = options.get_preprocessing_parser()\n preprocess_args = preprocess_parser.parse_args(\n [\n '--source-lang', 'in',\n '--target-lang', 'out',\n '--trainpref', os.path.join(data_dir, 'train'),\n '--validpref', os.path.join(data_dir, 'valid'),\n '--testpref', os.path.join(data_dir, 'test'),\n '--thresholdtgt', '0',\n '--thresholdsrc', '0',\n '--destdir', data_dir,\n ] + (extra_flags or []),\n )\n preprocess.main(preprocess_args)\n\n\ndef train_translation_model(data_dir, arch, extra_flags=None, task='translation'):\n train_parser = options.get_training_parser()\n train_args = options.parse_args_and_arch(\n train_parser,\n [\n '--task', task,\n data_dir,\n '--save-dir', data_dir,\n '--arch', arch,\n '--lr', '0.05',\n '--max-tokens', '500',\n '--max-epoch', '1',\n '--no-progress-bar',\n '--distributed-world-size', '1',\n '--source-lang', 'in',\n '--target-lang', 'out',\n ] + (extra_flags or []),\n )\n train.main(train_args)\n\n\ndef generate_main(data_dir, extra_flags=None):\n generate_parser = options.get_generation_parser()\n generate_args = options.parse_args_and_arch(\n generate_parser,\n [\n data_dir,\n '--path', os.path.join(data_dir, 'checkpoint_last.pt'),\n '--beam', '3',\n '--batch-size', '64',\n '--max-len-b', '5',\n '--gen-subset', 'valid',\n '--no-progress-bar',\n '--print-alignment',\n ] + (extra_flags or []),\n )\n\n # evaluate model in batch mode\n generate.main(generate_args)\n\n # evaluate model interactively\n generate_args.buffer_size = 0\n generate_args.input = '-'\n generate_args.max_sentences = None\n orig_stdin = sys.stdin\n sys.stdin = StringIO('h e l l o\\n')\n interactive.main(generate_args)\n sys.stdin = orig_stdin\n\n\ndef preprocess_lm_data(data_dir):\n preprocess_parser = options.get_preprocessing_parser()\n preprocess_args = preprocess_parser.parse_args([\n '--only-source',\n '--trainpref', os.path.join(data_dir, 'train.out'),\n '--validpref', os.path.join(data_dir, 'valid.out'),\n '--testpref', os.path.join(data_dir, 'test.out'),\n '--destdir', data_dir,\n ])\n preprocess.main(preprocess_args)\n\n\ndef train_language_model(data_dir, arch, extra_flags=None):\n train_parser = options.get_training_parser()\n train_args = options.parse_args_and_arch(\n train_parser,\n [\n '--task', 'language_modeling',\n data_dir,\n '--arch', arch,\n '--optimizer', 'adam',\n '--lr', '0.0001',\n '--criterion', 'adaptive_loss',\n '--adaptive-softmax-cutoff', '5,10,15',\n '--max-tokens', '500',\n '--tokens-per-sample', '500',\n '--save-dir', data_dir,\n '--max-epoch', '1',\n '--no-progress-bar',\n '--distributed-world-size', '1',\n '--ddp-backend', 'no_c10d',\n ] + (extra_flags or []),\n )\n train.main(train_args)\n\n\ndef eval_lm_main(data_dir):\n eval_lm_parser = options.get_eval_lm_parser()\n eval_lm_args = options.parse_args_and_arch(\n eval_lm_parser,\n [\n data_dir,\n '--path', os.path.join(data_dir, 'checkpoint_last.pt'),\n '--no-progress-bar',\n ],\n )\n eval_lm.main(eval_lm_args)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"torch.nn.functional.log_softmax",
"torch.tensor"
],
[
"numpy.memmap",
"torch.from_numpy",
"numpy.frombuffer",
"numpy.copyto",
"numpy.array",
"numpy.empty"
],
[
"torch.floor",
"torch.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
madhushree14/statsmodels
|
[
"04f00006a7aeb1c93d6894caa420698400da6c33",
"04f00006a7aeb1c93d6894caa420698400da6c33",
"04f00006a7aeb1c93d6894caa420698400da6c33",
"04f00006a7aeb1c93d6894caa420698400da6c33",
"04f00006a7aeb1c93d6894caa420698400da6c33",
"04f00006a7aeb1c93d6894caa420698400da6c33",
"04f00006a7aeb1c93d6894caa420698400da6c33"
] |
[
"statsmodels/examples/ex_lowess.py",
"docs/source/plots/graphics_plot_fit_ex.py",
"statsmodels/stats/nonparametric.py",
"examples/python/statespace_local_linear_trend.py",
"statsmodels/examples/ex_kernel_semilinear_dgp.py",
"examples/python/robust_models_0.py",
"examples/run_all.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 31 15:26:06 2011\n\nAuthor: Chris Jordan Squire\n\nextracted from test suite by josef-pktd\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.nonparametric.tests.results\n\n# this is just to check direct import\nimport statsmodels.nonparametric.smoothers_lowess\nstatsmodels.nonparametric.smoothers_lowess.lowess\n\nlowess = sm.nonparametric.lowess\n\nx = np.arange(20.)\n\n#standard normal noise\nnoise = np.array([-0.76741118, -0.30754369,\n 0.39950921, -0.46352422, -1.67081778,\n 0.6595567 , 0.66367639, -2.04388585,\n 0.8123281 , 1.45977518,\n 1.21428038, 1.29296866, 0.78028477,\n -0.2402853 , -0.21721302,\n 0.24549405, 0.25987014, -0.90709034,\n -1.45688216, -0.31780505])\ny = x + noise\n\nexpected_lowess = np.array([[ 0. , -0.58337912],\n [ 1. , 0.61951246],\n [ 2. , 1.82221628],\n [ 3. , 3.02536876],\n [ 4. , 4.22667951],\n [ 5. , 5.42387723],\n [ 6. , 6.60834945],\n [ 7. , 7.7797691 ],\n [ 8. , 8.91824348],\n [ 9. , 9.94997506],\n [ 10. , 10.89697569],\n [ 11. , 11.78746276],\n [ 12. , 12.62356492],\n [ 13. , 13.41538492],\n [ 14. , 14.15745254],\n [ 15. , 14.92343948],\n [ 16. , 15.70019862],\n [ 17. , 16.48167846],\n [ 18. , 17.26380699],\n [ 19. , 18.0466769 ]])\n\nactual_lowess = lowess(y, x)\nprint(actual_lowess)\nprint(np.max(np.abs(actual_lowess-expected_lowess)))\n\nplt.plot(y, 'o')\nplt.plot(actual_lowess[:,1])\nplt.plot(expected_lowess[:,1])\n\nrpath = os.path.split(statsmodels.nonparametric.tests.results.__file__)[0]\nrfile = os.path.join(rpath, 'test_lowess_frac.csv')\ntest_data = np.genfromtxt(open(rfile, 'rb'),\n delimiter = ',', names = True)\nexpected_lowess_23 = np.array([test_data['x'], test_data['out_2_3']]).T\nexpected_lowess_15 = np.array([test_data['x'], test_data['out_1_5']]).T\n\nactual_lowess_23 = lowess(test_data['y'], test_data['x'] ,frac = 2./3)\nactual_lowess_15 = lowess(test_data['y'], test_data['x'] ,frac = 1./5)\n\n#plt.show()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on Monday April 1st 2013\n\nAuthor: Padarn Wilson\n\n\"\"\"\n\n# Load the Statewide Crime data set and perform linear regression with\n# 'poverty' and 'hs_grad' as variables and 'murder' as the response\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport statsmodels.api as sm\n\ndata = sm.datasets.statecrime.load_pandas().data\nmurder = data['murder']\nX = data[['poverty', 'hs_grad']].copy()\nX['constant'] = 1\n\ny = murder\nmodel = sm.OLS(y, X)\nresults = model.fit()\n\n# Create a plot just for the variable 'Poverty':\n\nfig, ax = plt.subplots()\nfig = sm.graphics.plot_fit(results, 0, ax=ax)\nax.set_ylabel(\"Murder Rate\")\nax.set_xlabel(\"Poverty Level\")\nax.set_title(\"Linear Regression\")\n\nplt.show()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nRank based methods for inferential statistics\n\nCreated on Sat Aug 15 10:18:53 2020\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\n\"\"\"\n\n\nimport numpy as np\n\nfrom scipy import stats\nfrom scipy.stats import rankdata\n\nfrom statsmodels.stats.base import HolderTuple\nfrom statsmodels.stats.weightstats import (\n _zconfint_generic, _tconfint_generic, _zstat_generic, _tstat_generic)\n\n\ndef rankdata_2samp(x1, x2):\n \"\"\"Compute midranks for two samples\n\n Parameters\n ----------\n x1, x2 : array_like\n Original data for two samples that will be converted to midranks.\n\n Returns\n -------\n rank1 : ndarray\n Midranks of the first sample in the pooled sample.\n rank2 : ndarray\n Midranks of the second sample in the pooled sample.\n ranki1 : ndarray\n Internal midranks of the first sample.\n ranki2 : ndarray\n Internal midranks of the second sample.\n\n \"\"\"\n x1 = np.asarray(x1)\n x2 = np.asarray(x2)\n\n nobs1 = len(x1)\n nobs2 = len(x2)\n if nobs1 == 0 or nobs2 == 0:\n raise ValueError(\"one sample has zero length\")\n\n x_combined = np.concatenate((x1, x2))\n if x_combined.ndim > 1:\n rank = np.apply_along_axis(rankdata, 0, x_combined)\n else:\n rank = rankdata(x_combined) # no axis in older scipy\n rank1 = rank[:nobs1]\n rank2 = rank[nobs1:]\n if x_combined.ndim > 1:\n ranki1 = np.apply_along_axis(rankdata, 0, x1)\n ranki2 = np.apply_along_axis(rankdata, 0, x2)\n else:\n ranki1 = rankdata(x1)\n ranki2 = rankdata(x2)\n return rank1, rank2, ranki1, ranki2\n\n\nclass RankCompareResult(HolderTuple):\n \"\"\"Results for rank comparison\n\n This is a subclass of HolderTuple that includes results from intermediate\n computations, as well as methods for hypothesis tests, confidence intervals\n ans summary.\n \"\"\"\n\n def conf_int(self, value=None, alpha=0.05, alternative=\"two-sided\"):\n \"\"\"\n Confidence interval for probability that sample 1 has larger values\n\n Confidence interval is for the shifted probability\n\n P(x1 > x2) + 0.5 * P(x1 = x2) - value\n\n Parameters\n ----------\n value : float\n Value, default 0, shifts the confidence interval,\n e.g. ``value=0.5`` centers the confidence interval at zero.\n alpha : float\n Significance level for the confidence interval, coverage is\n ``1-alpha``\n alternative : str\n The alternative hypothesis, H1, has to be one of the following\n\n * 'two-sided' : H1: ``prob - value`` not equal to 0.\n * 'larger' : H1: ``prob - value > 0``\n * 'smaller' : H1: ``prob - value < 0``\n\n Returns\n -------\n lower : float or ndarray\n Lower confidence limit. This is -inf for the one-sided alternative\n \"smaller\".\n upper : float or ndarray\n Upper confidence limit. This is inf for the one-sided alternative\n \"larger\".\n\n \"\"\"\n\n p0 = value\n if p0 is None:\n p0 = 0\n diff = self.prob1 - p0\n std_diff = np.sqrt(self.var / self.nobs)\n\n if self.use_t is False:\n return _zconfint_generic(diff, std_diff, alpha, alternative)\n else:\n return _tconfint_generic(diff, std_diff, self.df, alpha,\n alternative)\n\n def test_prob_superior(self, value=0.5, alternative=\"two-sided\"):\n \"\"\"test for superiority probability\n\n H0: P(x1 > x2) + 0.5 * P(x1 = x2) = value\n\n The alternative is that the probability is either not equal, larger\n or smaller than the null-value depending on the chosen alternative.\n\n Parameters\n ----------\n value : float\n Value of the probability under the Null hypothesis.\n alternative : str\n The alternative hypothesis, H1, has to be one of the following\n\n * 'two-sided' : H1: ``prob - value`` not equal to 0.\n * 'larger' : H1: ``prob - value > 0``\n * 'smaller' : H1: ``prob - value < 0``\n\n Returns\n -------\n res : HolderTuple\n HolderTuple instance with the following main attributes\n\n statistic : float\n Test statistic for z- or t-test\n pvalue : float\n Pvalue of the test based on either normal or t distribution.\n\n \"\"\"\n\n p0 = value # alias\n # diff = self.prob1 - p0 # for reporting, not used in computation\n # TODO: use var_prob\n std_diff = np.sqrt(self.var / self.nobs)\n\n # TODO: return HolderTuple\n # corresponds to a one-sample test and either p0 or diff could be used\n if not self.use_t:\n stat, pv = _zstat_generic(self.prob1, p0, std_diff, alternative,\n diff=0)\n distr = \"normal\"\n else:\n stat, pv = _tstat_generic(self.prob1, p0, std_diff, self.df,\n alternative, diff=0)\n distr = \"t\"\n\n res = HolderTuple(statistic=stat,\n pvalue=pv,\n df=self.df,\n distribution=distr\n )\n return res\n\n def tost_prob_superior(self, low, upp):\n '''test of stochastic (non-)equivalence of p = P(x1 > x2)\n\n Null hypothesis: p < low or p > upp\n Alternative hypothesis: low < p < upp\n\n where p is the probability that a random draw from the population of\n the first sample has a larger value than a random draw from the\n population of the second sample, specifically\n\n p = P(x1 > x2) + 0.5 * P(x1 = x2)\n\n If the pvalue is smaller than a threshold, say 0.05, then we reject the\n hypothesis that the probability p that distribution 1 is stochastically\n superior to distribution 2 is outside of the interval given by\n thresholds low and upp.\n\n Parameters\n ----------\n low, upp : float\n equivalence interval low < mean < upp\n\n Returns\n -------\n res : HolderTuple\n HolderTuple instance with the following main attributes\n\n pvalue : float\n Pvalue of the equivalence test given by the larger pvalue of\n the two one-sided tests.\n statistic : float\n Test statistic of the one-sided test that has the larger\n pvalue.\n results_larger : HolderTuple\n Results instanc with test statistic, pvalue and degrees of\n freedom for lower threshold test.\n results_smaller : HolderTuple\n Results instanc with test statistic, pvalue and degrees of\n freedom for upper threshold test.\n\n '''\n\n t1 = self.test_prob_superior(low, alternative='larger')\n t2 = self.test_prob_superior(upp, alternative='smaller')\n\n # idx_max = 0 if t1.pvalue < t2.pvalue else 1\n idx_max = np.asarray(t1.pvalue < t2.pvalue, int)\n title = \"Equivalence test for Prob(x1 > x2) + 0.5 Prob(x1 = x2) \"\n res = HolderTuple(statistic=np.choose(idx_max,\n [t1.statistic, t2.statistic]),\n # pvalue=[t1.pvalue, t2.pvalue][idx_max], # python\n # use np.choose for vectorized selection\n pvalue=np.choose(idx_max, [t1.pvalue, t2.pvalue]),\n results_larger=t1,\n results_smaller=t2,\n title=title\n )\n return res\n\n def confint_lintransf(self, const=-1, slope=2, alpha=0.05,\n alternative=\"two-sided\"):\n \"\"\"confidence interval of a linear transformation of prob1\n\n This computes the confidence interval for\n\n d = const + slope * prob1\n\n Default values correspond to Somers' d.\n\n Parameters\n ----------\n const, slope : float\n Constant and slope for linear (affine) transformation.\n alpha : float\n Significance level for the confidence interval, coverage is\n ``1-alpha``\n alternative : str\n The alternative hypothesis, H1, has to be one of the following\n\n * 'two-sided' : H1: ``prob - value`` not equal to 0.\n * 'larger' : H1: ``prob - value > 0``\n * 'smaller' : H1: ``prob - value < 0``\n\n Returns\n -------\n lower : float or ndarray\n Lower confidence limit. This is -inf for the one-sided alternative\n \"smaller\".\n upper : float or ndarray\n Upper confidence limit. This is inf for the one-sided alternative\n \"larger\".\n\n \"\"\"\n\n low_p, upp_p = self.conf_int(alpha=alpha, alternative=alternative)\n low = const + slope * low_p\n upp = const + slope * upp_p\n if slope < 0:\n low, upp = upp, low\n return low, upp\n\n def effectsize_normal(self, prob=None):\n \"\"\"\n Cohen's d, standardized mean difference under normality assumption.\n\n This computes the standardized mean difference, Cohen's d, effect size\n that is equivalent to the rank based probability ``p`` of being\n stochastically larger if we assume that the data is normally\n distributed, given by\n\n :math: `d = F^{-1}(p) * \\\\sqrt{2}`\n\n where :math:`F^{-1}` is the inverse of the cdf of the normal\n distribution.\n\n Parameters\n ----------\n prob : float in (0, 1)\n Probability to be converted to Cohen's d effect size.\n If prob is None, then the ``prob1`` attribute is used.\n\n Returns\n -------\n equivalent Cohen's d effect size under normality assumption.\n\n \"\"\"\n if prob is None:\n prob = self.prob1\n return stats.norm.ppf(prob) * np.sqrt(2)\n\n def summary(self, alpha=0.05, xname=None):\n \"\"\"summary table for probability that random draw x1 is larger than x2\n\n Parameters\n ----------\n alpha : float\n Significance level for confidence intervals. Coverage is 1 - alpha\n xname : None or list of str\n If None, then each row has a name column with generic names.\n If xname is a list of strings, then it will be included as part\n of those names.\n\n Returns\n -------\n SimpleTable instance with methods to convert to different output\n formats.\n \"\"\"\n\n yname = \"None\"\n effect = np.atleast_1d(self.prob1)\n if self.pvalue is None:\n statistic, pvalue = self.test_prob_superior()\n else:\n pvalue = self.pvalue\n statistic = self.statistic\n pvalues = np.atleast_1d(pvalue)\n ci = np.atleast_2d(self.conf_int(alpha))\n if ci.shape[0] > 1:\n ci = ci.T\n use_t = self.use_t\n sd = np.atleast_1d(np.sqrt(self.var_prob))\n statistic = np.atleast_1d(statistic)\n if xname is None:\n xname = ['c%d' % ii for ii in range(len(effect))]\n\n xname2 = ['prob(x1>x2) %s' % ii for ii in xname]\n\n title = \"Probability sample 1 is stochastically larger\"\n from statsmodels.iolib.summary import summary_params\n\n summ = summary_params((self, effect, sd, statistic,\n pvalues, ci),\n yname=yname, xname=xname2, use_t=use_t,\n title=title, alpha=alpha)\n return summ\n\n\ndef rank_compare_2indep(x1, x2, use_t=True):\n \"\"\"\n Statistics and tests for the probability that x1 has larger values than x2.\n\n p is the probability that a random draw from the population of\n the first sample has a larger value than a random draw from the\n population of the second sample, specifically\n\n p = P(x1 > x2) + 0.5 * P(x1 = x2)\n\n This is a measure underlying Wilcoxon-Mann-Whitney's U test,\n Fligner-Policello test and Brunner-Munzel test, and\n Inference is based on the asymptotic distribution of the Brunner-Munzel\n test. The half probability for ties corresponds to the use of midranks\n and make it valid for discrete variables.\n\n The Null hypothesis for stochastic equality is p = 0.5, which corresponds\n to the Brunner-Munzel test.\n\n Parameters\n ----------\n x1, x2 : array_like\n Array of samples, should be one-dimensional.\n use_t : poolean\n If use_t is true, the t distribution with Welch-Satterthwaite type\n degrees of freedom is used for p-value and confidence interval.\n If use_t is false, then the normal distribution is used.\n\n Returns\n -------\n res : RankCompareResult\n\n statistic : float\n The Brunner-Munzer W statistic.\n pvalue : float\n p-value assuming an t distribution. One-sided or\n two-sided, depending on the choice of `alternative` and `use_t`.\n\n See Also\n --------\n scipy.stats.brunnermunzel : Brunner-Munzel test for stochastic equality\n scipy.stats.mannwhitneyu : Mann-Whitney rank test on two samples.\n\n Notes\n -----\n Wilcoxon-Mann-Whitney assumes equal variance or equal distribution under\n the Null hypothesis. Fligner-Policello test allows for unequal variances\n but assumes continuous distribution, i.e. no ties.\n Brunner-Munzel extend the test to allow for unequal variance and discrete\n or ordered categorical random variables.\n\n Brunner and Munzel recommended to estimate the p-value by t-distribution\n when the size of data is 50 or less. If the size is lower than 10, it would\n be better to use permuted Brunner Munzel test (see [2]_) for the test\n of stochastic equality.\n\n This measure has been introduced in the literature under many different\n names relying on a variety of assumptions.\n In psychology, McGraw and Wong (1992) introduced it as Common Language\n effect size for the continuous, normal distribution case,\n Vargha and Delaney (2000) [3]_ extended it to the nonparameteric\n continuous distribution case as in Fligner-Policello.\n\n WMW and related tests can only be interpreted as test of medians or tests\n of central location only under very restrictive additional assumptions\n such as both distribution are identical under the equality null hypothesis\n (assumed by Mann-Whitney) or both distributions are symmetric (shown by\n Fligner-Policello). If the distribution of the two samples can differ in\n an arbitrary way, then the equality Null hypothesis corresponds to p=0.5\n against an alternative p != 0.5. see for example Conroy (2012) [4]_ and\n Divine et al (2018) [5]_ .\n\n Note: Brunner-Munzel and related literature define the probability that x1\n is stochastically smaller than x2, while here we use stochastically larger.\n This equivalent to switching x1 and x2 in the two sample case.\n\n References\n ----------\n .. [1] Brunner, E. and Munzel, U. \"The nonparametric Benhrens-Fisher\n problem: Asymptotic theory and a small-sample approximation\".\n Biometrical Journal. Vol. 42(2000): 17-25.\n .. [2] Neubert, K. and Brunner, E. \"A studentized permutation test for the\n non-parametric Behrens-Fisher problem\". Computational Statistics and\n Data Analysis. Vol. 51(2007): 5192-5204.\n .. [3] Vargha, András, and Harold D. Delaney. 2000. “A Critique and\n Improvement of the CL Common Language Effect Size Statistics of\n McGraw and Wong.” Journal of Educational and Behavioral Statistics\n 25 (2): 101–32. https://doi.org/10.3102/10769986025002101.\n .. [4] Conroy, Ronán M. 2012. “What Hypotheses Do ‘Nonparametric’ Two-Group\n Tests Actually Test?” The Stata Journal: Promoting Communications on\n Statistics and Stata 12 (2): 182–90.\n https://doi.org/10.1177/1536867X1201200202.\n .. [5] Divine, George W., H. James Norton, Anna E. Barón, and Elizabeth\n Juarez-Colunga. 2018. “The Wilcoxon–Mann–Whitney Procedure Fails as\n a Test of Medians.” The American Statistician 72 (3): 278–86.\n https://doi.org/10.1080/00031305.2017.1305291.\n\n \"\"\"\n x1 = np.asarray(x1)\n x2 = np.asarray(x2)\n\n nobs1 = len(x1)\n nobs2 = len(x2)\n nobs = nobs1 + nobs2\n if nobs1 == 0 or nobs2 == 0:\n raise ValueError(\"one sample has zero length\")\n\n rank1, rank2, ranki1, ranki2 = rankdata_2samp(x1, x2)\n\n meanr1 = np.mean(rank1, axis=0)\n meanr2 = np.mean(rank2, axis=0)\n meanri1 = np.mean(ranki1, axis=0)\n meanri2 = np.mean(ranki2, axis=0)\n\n S1 = np.sum(np.power(rank1 - ranki1 - meanr1 + meanri1, 2.0), axis=0)\n S1 /= nobs1 - 1\n S2 = np.sum(np.power(rank2 - ranki2 - meanr2 + meanri2, 2.0), axis=0)\n S2 /= nobs2 - 1\n\n wbfn = nobs1 * nobs2 * (meanr1 - meanr2)\n wbfn /= (nobs1 + nobs2) * np.sqrt(nobs1 * S1 + nobs2 * S2)\n\n # Here we only use alternative == \"two-sided\"\n if use_t:\n df_numer = np.power(nobs1 * S1 + nobs2 * S2, 2.0)\n df_denom = np.power(nobs1 * S1, 2.0) / (nobs1 - 1)\n df_denom += np.power(nobs2 * S2, 2.0) / (nobs2 - 1)\n df = df_numer / df_denom\n pvalue = 2 * stats.t.sf(np.abs(wbfn), df)\n else:\n pvalue = 2 * stats.norm.sf(np.abs(wbfn))\n df = None\n\n # other info\n var1 = S1 / (nobs - nobs1)**2\n var2 = S2 / (nobs - nobs2)**2\n var_prob = (var1 / nobs1 + var2 / nobs2)\n var = nobs * (var1 / nobs1 + var2 / nobs2)\n prob1 = (meanr1 - (nobs1 + 1) / 2) / nobs2\n prob2 = (meanr2 - (nobs2 + 1) / 2) / nobs1\n\n return RankCompareResult(statistic=wbfn, pvalue=pvalue, s1=S1, s2=S2,\n var1=var1, var2=var2, var=var,\n var_prob=var_prob,\n nobs1=nobs1, nobs2=nobs2, nobs=nobs,\n mean1=meanr1, mean2=meanr2,\n prob1=prob1, prob2=prob2,\n somersd1=prob1 * 2 - 1, somersd2=prob2 * 2 - 1,\n df=df, use_t=use_t\n )\n\n\ndef rank_compare_2ordinal(count1, count2, ddof=1, use_t=True):\n \"\"\"stochastically larger probability for 2 independend ordinal samples\n\n This is a special case of `rank_compare_2indep` when the data are given as\n counts of two independent ordinal, i.e. ordered multinomial, samples.\n\n The statistic of interest is the probability that a random draw from the\n population of the first sample has a larger value than a random draw from\n the population of the second sample, specifically\n\n p = P(x1 > x2) + 0.5 * P(x1 = x2)\n\n Parameters\n ----------\n count1 : array_like\n Counts of the first sample, categories are assumed to be ordered.\n count2 : array_like\n Counts of the second sample, number of categories and ordering needs\n to be the same as for sample 1\n ddof : scalar\n Degrees of freedom correction for variance estimation. The default\n ddof=1 corresponds to `rank_compare_2indep`.\n use_t : bool\n If use_t is true, the t distribution with Welch-Satterthwaite type\n degrees of freedom is used for p-value and confidence interval.\n If use_t is false, then the normal distribution is used.\n\n Returns\n -------\n res : RankCompareResult\n This includes methods for hypothesis tests and confidence intervals\n for the probability that sample 1 is stochastically larger than\n sample 2.\n\n Notes\n -----\n The implementation is based on the appendix of Munzel and Hauschke (2003)\n with the addition of ``ddof`` so that the results match the general\n function `rank_compare_2indep`.\n\n \"\"\"\n\n count1 = np.asarray(count1)\n count2 = np.asarray(count2)\n nobs1, nobs2 = count1.sum(), count2.sum()\n freq1 = count1 / nobs1\n freq2 = count2 / nobs2\n cdf1 = np.concatenate(([0], freq1)).cumsum(axis=0)\n cdf2 = np.concatenate(([0], freq2)).cumsum(axis=0)\n\n # mid rank cdf\n cdfm1 = (cdf1[1:] + cdf1[:-1]) / 2\n cdfm2 = (cdf2[1:] + cdf2[:-1]) / 2\n prob1 = (cdfm2 * freq1).sum()\n prob2 = (cdfm1 * freq2).sum()\n\n var1 = (cdfm2**2 * freq1).sum() - prob1**2\n var2 = (cdfm1**2 * freq2).sum() - prob2**2\n\n var_prob = (var1 / (nobs1 - ddof) + var2 / (nobs2 - ddof))\n nobs = nobs1 + nobs2\n var = nobs * var_prob\n vn1 = var1 * nobs2 * nobs1 / (nobs1 - ddof)\n vn2 = var2 * nobs1 * nobs2 / (nobs2 - ddof)\n df = (vn1 + vn2)**2 / (vn1**2 / (nobs1 - 1) + vn2**2 / (nobs2 - 1))\n res = RankCompareResult(statistic=None, pvalue=None, s1=None, s2=None,\n var1=var1, var2=var2, var=var,\n var_prob=var_prob,\n nobs1=nobs1, nobs2=nobs2, nobs=nobs,\n mean1=None, mean2=None,\n prob1=prob1, prob2=prob2,\n somersd1=prob1 * 2 - 1, somersd2=prob2 * 2 - 1,\n df=df, use_t=use_t\n )\n\n return res\n\n\ndef prob_larger_continuous(distr1, distr2):\n \"\"\"probability indicating that distr1 is stochastically larger than distr2\n\n This computes\n\n p = P(x1 > x2)\n\n for two continuous distributions, where `distr1` and `distr2` are the\n distributions of random variables x1 and x2 respectively.\n\n Parameters\n ----------\n distr1, distr2 : distributions\n Two instances of scipy.stats.distributions. The required methods are\n cdf of the second distribution and expect of the first distribution.\n\n Returns\n -------\n p : probability x1 is larger than x2\n\n\n Notes\n -----\n This is a one-liner that is added mainly as reference.\n\n Examples\n --------\n >>> from scipy import stats\n >>> prob_larger_continuous(stats.norm, stats.t(5))\n 0.4999999999999999\n\n # which is the same as\n >>> stats.norm.expect(stats.t(5).cdf)\n 0.4999999999999999\n\n # distribution 1 with smaller mean (loc) than distribution 2\n >>> prob_larger_continuous(stats.norm, stats.norm(loc=1))\n 0.23975006109347669\n\n \"\"\"\n\n return distr1.expect(distr2.cdf)\n\n\ndef cohensd2problarger(d):\n \"\"\"convert Cohen's d effect size to stochastically-larger-probability\n\n This assumes observations are normally distributed.\n\n Computed as\n\n p = Prob(x1 > x2) = F(d / sqrt(2))\n\n where `F` is cdf of normal distribution. Cohen's d is defined as\n\n d = (mean1 - mean2) / std\n\n where ``std`` is the pooled within standard deviation.\n\n Parameters\n ----------\n d : float or array_like\n Cohen's d effect size for difference mean1 - mean2.\n\n Returns\n -------\n prob : float or ndarray\n Prob(x1 > x2)\n \"\"\"\n\n return stats.norm.cdf(d / np.sqrt(2))\n",
"# coding: utf-8\n\n# DO NOT EDIT\n# Autogenerated from the notebook statespace_local_linear_trend.ipynb.\n# Edit the notebook and then sync the output with this file.\n#\n# flake8: noqa\n# DO NOT EDIT\n\n# # State space modeling: Local Linear Trends\n\n# This notebook describes how to extend the statsmodels statespace classes\n# to create and estimate a custom model. Here we develop a local linear\n# trend model.\n#\n# The Local Linear Trend model has the form (see Durbin and Koopman 2012,\n# Chapter 3.2 for all notation and details):\n#\n# $$\n# \\begin{align}\n# y_t & = \\mu_t + \\varepsilon_t \\qquad & \\varepsilon_t \\sim\n# N(0, \\sigma_\\varepsilon^2) \\\\\n# \\mu_{t+1} & = \\mu_t + \\nu_t + \\xi_t & \\xi_t \\sim N(0, \\sigma_\\xi^2) \\\\\n# \\nu_{t+1} & = \\nu_t + \\zeta_t & \\zeta_t \\sim N(0, \\sigma_\\zeta^2)\n# \\end{align}\n# $$\n#\n# It is easy to see that this can be cast into state space form as:\n#\n# $$\n# \\begin{align}\n# y_t & = \\begin{pmatrix} 1 & 0 \\end{pmatrix} \\begin{pmatrix} \\mu_t \\\\\n# \\nu_t \\end{pmatrix} + \\varepsilon_t \\\\\n# \\begin{pmatrix} \\mu_{t+1} \\\\ \\nu_{t+1} \\end{pmatrix} & = \\begin{bmatrix}\n# 1 & 1 \\\\ 0 & 1 \\end{bmatrix} \\begin{pmatrix} \\mu_t \\\\ \\nu_t \\end{pmatrix}\n# + \\begin{pmatrix} \\xi_t \\\\ \\zeta_t \\end{pmatrix}\n# \\end{align}\n# $$\n#\n# Notice that much of the state space representation is composed of known\n# values; in fact the only parts in which parameters to be estimated appear\n# are in the variance / covariance matrices:\n#\n# $$\n# \\begin{align}\n# H_t & = \\begin{bmatrix} \\sigma_\\varepsilon^2 \\end{bmatrix} \\\\\n# Q_t & = \\begin{bmatrix} \\sigma_\\xi^2 & 0 \\\\ 0 & \\sigma_\\zeta^2\n# \\end{bmatrix}\n# \\end{align}\n# $$\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\n# To take advantage of the existing infrastructure, including Kalman\n# filtering and maximum likelihood estimation, we create a new class which\n# extends from `statsmodels.tsa.statespace.MLEModel`. There are a number of\n# things that must be specified:\n#\n# 1. **k_states**, **k_posdef**: These two parameters must be provided to\n# the base classes in initialization. The inform the statespace model about\n# the size of, respectively, the state vector, above $\\begin{pmatrix} \\mu_t\n# & \\nu_t \\end{pmatrix}'$, and the state error vector, above\n# $\\begin{pmatrix} \\xi_t & \\zeta_t \\end{pmatrix}'$. Note that the dimension\n# of the endogenous vector does not have to be specified, since it can be\n# inferred from the `endog` array.\n# 2. **update**: The method `update`, with argument `params`, must be\n# specified (it is used when `fit()` is called to calculate the MLE). It\n# takes the parameters and fills them into the appropriate state space\n# matrices. For example, below, the `params` vector contains variance\n# parameters $\\begin{pmatrix} \\sigma_\\varepsilon^2 & \\sigma_\\xi^2 &\n# \\sigma_\\zeta^2\\end{pmatrix}$, and the `update` method must place them in\n# the observation and state covariance matrices. More generally, the\n# parameter vector might be mapped into many different places in all of the\n# statespace matrices.\n# 3. **statespace matrices**: by default, all state space matrices\n# (`obs_intercept, design, obs_cov, state_intercept, transition, selection,\n# state_cov`) are set to zeros. Values that are fixed (like the ones in the\n# design and transition matrices here) can be set in initialization, whereas\n# values that vary with the parameters should be set in the `update` method.\n# Note that it is easy to forget to set the selection matrix, which is often\n# just the identity matrix (as it is here), but not setting it will lead to\n# a very different model (one where there is not a stochastic component to\n# the transition equation).\n# 4. **start params**: start parameters must be set, even if it is just a\n# vector of zeros, although often good start parameters can be found from\n# the data. Maximum likelihood estimation by gradient methods (as employed\n# here) can be sensitive to the starting parameters, so it is important to\n# select good ones if possible. Here it does not matter too much (although\n# as variances, they should't be set zero).\n# 5. **initialization**: in addition to defined state space matrices, all\n# state space models must be initialized with the mean and variance for the\n# initial distribution of the state vector. If the distribution is known,\n# `initialize_known(initial_state, initial_state_cov)` can be called, or if\n# the model is stationary (e.g. an ARMA model), `initialize_stationary` can\n# be used. Otherwise, `initialize_approximate_diffuse` is a reasonable\n# generic initialization (exact diffuse initialization is not yet\n# available). Since the local linear trend model is not stationary (it is\n# composed of random walks) and since the distribution is not generally\n# known, we use `initialize_approximate_diffuse` below.\n#\n# The above are the minimum necessary for a successful model. There are\n# also a number of things that do not have to be set, but which may be\n# helpful or important for some applications:\n#\n# 1. **transform / untransform**: when `fit` is called, the optimizer in\n# the background will use gradient methods to select the parameters that\n# maximize the likelihood function. By default it uses unbounded\n# optimization, which means that it may select any parameter value. In many\n# cases, that is not the desired behavior; variances, for example, cannot be\n# negative. To get around this, the `transform` method takes the\n# unconstrained vector of parameters provided by the optimizer and returns a\n# constrained vector of parameters used in likelihood evaluation.\n# `untransform` provides the reverse operation.\n# 2. **param_names**: this internal method can be used to set names for\n# the estimated parameters so that e.g. the summary provides meaningful\n# names. If not present, parameters are named `param0`, `param1`, etc.\n\"\"\"\nUnivariate Local Linear Trend Model\n\"\"\"\n\n\nclass LocalLinearTrend(sm.tsa.statespace.MLEModel):\n def __init__(self, endog):\n # Model order\n k_states = k_posdef = 2\n\n # Initialize the statespace\n super(LocalLinearTrend, self).__init__(\n endog,\n k_states=k_states,\n k_posdef=k_posdef,\n initialization='approximate_diffuse',\n loglikelihood_burn=k_states)\n\n # Initialize the matrices\n self.ssm['design'] = np.array([1, 0])\n self.ssm['transition'] = np.array([[1, 1], [0, 1]])\n self.ssm['selection'] = np.eye(k_states)\n\n # Cache some indices\n self._state_cov_idx = ('state_cov', ) + np.diag_indices(k_posdef)\n\n @property\n def param_names(self):\n return ['sigma2.measurement', 'sigma2.level', 'sigma2.trend']\n\n @property\n def start_params(self):\n return [np.std(self.endog)] * 3\n\n def transform_params(self, unconstrained):\n return unconstrained**2\n\n def untransform_params(self, constrained):\n return constrained**0.5\n\n def update(self, params, *args, **kwargs):\n params = super(LocalLinearTrend, self).update(params, *args, **kwargs)\n\n # Observation covariance\n self.ssm['obs_cov', 0, 0] = params[0]\n\n # State covariance\n self.ssm[self._state_cov_idx] = params[1:]\n\n\n# Using this simple model, we can estimate the parameters from a local\n# linear trend model. The following example is from Commandeur and Koopman\n# (2007), section 3.4., modeling motor vehicle fatalities in Finland.\n\nimport requests\nfrom io import BytesIO\nfrom zipfile import ZipFile\n\n# Download the dataset\nck = requests.get(\n 'http://staff.feweb.vu.nl/koopman/projects/ckbook/OxCodeAll.zip').content\nzipped = ZipFile(BytesIO(ck))\ndf = pd.read_table(\n BytesIO(\n zipped.read('OxCodeIntroStateSpaceBook/Chapter_2/NorwayFinland.txt')),\n skiprows=1,\n header=None,\n sep='\\s+',\n engine='python',\n names=['date', 'nf', 'ff'])\n\n# Since we defined the local linear trend model as extending from\n# `MLEModel`, the `fit()` method is immediately available, just as in other\n# statsmodels maximum likelihood classes. Similarly, the returned results\n# class supports many of the same post-estimation results, like the\n# `summary` method.\n#\n\n# Load Dataset\ndf.index = pd.date_range(\n start='%d-01-01' % df.date[0], end='%d-01-01' % df.iloc[-1, 0], freq='AS')\n\n# Log transform\ndf['lff'] = np.log(df['ff'])\n\n# Setup the model\nmod = LocalLinearTrend(df['lff'])\n\n# Fit it using MLE (recall that we are fitting the three variance\n# parameters)\nres = mod.fit(disp=False)\nprint(res.summary())\n\n# Finally, we can do post-estimation prediction and forecasting. Notice\n# that the end period can be specified as a date.\n\n# Perform prediction and forecasting\npredict = res.get_prediction()\nforecast = res.get_forecast('2014')\n\nfig, ax = plt.subplots(figsize=(10, 4))\n\n# Plot the results\ndf['lff'].plot(ax=ax, style='k.', label='Observations')\npredict.predicted_mean.plot(ax=ax, label='One-step-ahead Prediction')\npredict_ci = predict.conf_int(alpha=0.05)\npredict_index = np.arange(len(predict_ci))\nax.fill_between(\n predict_index[2:],\n predict_ci.iloc[2:, 0],\n predict_ci.iloc[2:, 1],\n alpha=0.1)\n\nforecast.predicted_mean.plot(ax=ax, style='r', label='Forecast')\nforecast_ci = forecast.conf_int()\nforecast_index = np.arange(len(predict_ci), len(predict_ci) + len(forecast_ci))\nax.fill_between(\n forecast_index, forecast_ci.iloc[:, 0], forecast_ci.iloc[:, 1], alpha=0.1)\n\n# Cleanup the image\nax.set_ylim((4, 8))\nlegend = ax.legend(loc='lower left')\n\n# ### References\n#\n# Commandeur, Jacques J. F., and Siem Jan Koopman. 2007.\n# An Introduction to State Space Time Series Analysis.\n# Oxford ; New York: Oxford University Press.\n#\n# Durbin, James, and Siem Jan Koopman. 2012.\n# Time Series Analysis by State Space Methods: Second Edition.\n# Oxford University Press.\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on Sun Jan 06 09:50:54 2013\n\nAuthor: Josef Perktold\n\"\"\"\n\n\n\nif __name__ == '__main__':\n\n import numpy as np\n import matplotlib.pyplot as plt\n #from statsmodels.nonparametric.api import KernelReg\n import statsmodels.sandbox.nonparametric.kernel_extras as smke\n import statsmodels.sandbox.nonparametric.dgp_examples as dgp\n\n class UnivariateFunc1a(dgp.UnivariateFunc1):\n\n def het_scale(self, x):\n return 0.5\n\n seed = np.random.randint(999999)\n #seed = 430973\n #seed = 47829\n seed = 648456 #good seed for het_scale = 0.5\n print(seed)\n np.random.seed(seed)\n\n nobs, k_vars = 300, 3\n x = np.random.uniform(-2, 2, size=(nobs, k_vars))\n xb = x.sum(1) / 3 #beta = [1,1,1]\n\n k_vars_lin = 2\n x2 = np.random.uniform(-2, 2, size=(nobs, k_vars_lin))\n\n funcs = [#dgp.UnivariateFanGijbels1(),\n #dgp.UnivariateFanGijbels2(),\n #dgp.UnivariateFanGijbels1EU(),\n #dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))\n UnivariateFunc1a(x=xb)\n ]\n\n res = []\n fig = plt.figure()\n for i,func in enumerate(funcs):\n #f = func()\n f = func\n y = f.y + x2.sum(1)\n model = smke.SemiLinear(y, x2, x, 'ccc', k_vars_lin)\n mean, mfx = model.fit()\n ax = fig.add_subplot(1, 1, i+1)\n f.plot(ax=ax)\n xb_est = np.dot(model.exog, model.b)\n sortidx = np.argsort(xb_est) #f.x)\n ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, label='est. mean')\n# ax.plot(f.x, mean0, color='g', lw=2, label='est. mean')\n ax.legend(loc='upper left')\n res.append((model, mean, mfx))\n\n print('beta', model.b)\n print('scale - est', (y - (xb_est+mean)).std())\n print('scale - dgp realised, true', (y - (f.y_true + x2.sum(1))).std(), \\\n 2 * f.het_scale(1))\n fittedvalues = xb_est + mean\n resid = np.squeeze(model.endog) - fittedvalues\n print('corrcoef(fittedvalues, resid)', np.corrcoef(fittedvalues, resid)[0,1])\n print('variance of components, var and as fraction of var(y)')\n print('fitted values', fittedvalues.var(), fittedvalues.var() / y.var())\n print('linear ', xb_est.var(), xb_est.var() / y.var())\n print('nonparametric', mean.var(), mean.var() / y.var())\n print('residual ', resid.var(), resid.var() / y.var())\n print('\\ncovariance decomposition fraction of var(y)')\n print(np.cov(fittedvalues, resid) / model.endog.var(ddof=1))\n print('sum', (np.cov(fittedvalues, resid) / model.endog.var(ddof=1)).sum())\n print('\\ncovariance decomposition, xb, m, resid as fraction of var(y)')\n print(np.cov(np.column_stack((xb_est, mean, resid)), rowvar=False) / model.endog.var(ddof=1))\n\n fig.suptitle('Kernel Regression')\n fig.show()\n\n alpha = 0.7\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(f.x[sortidx], f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')\n ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')\n ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')\n ax.legend(loc='upper left')\n\n sortidx = np.argsort(xb_est + mean)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(f.x[sortidx], y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')\n ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')\n ax.plot(f.x[sortidx], (xb_est + mean)[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')\n ax.legend(loc='upper left')\n ax.set_title('Semilinear Model - observed and total fitted')\n\n fig = plt.figure()\n# ax = fig.add_subplot(1, 2, 1)\n# ax.plot(f.x, f.y, 'o', color='b', lw=2, alpha=alpha, label='observed')\n# ax.plot(f.x, f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')\n# ax.plot(f.x, mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')\n# ax.legend(loc='upper left')\n sortidx0 = np.argsort(xb)\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(f.y[sortidx0], 'o', color='b', lw=2, alpha=alpha, label='observed')\n ax.plot(f.y_true[sortidx0], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')\n ax.plot(mean[sortidx0], 'o', color='r', lw=2, alpha=alpha, label='est. mean')\n ax.legend(loc='upper left')\n ax.set_title('Single Index Model (sorted by true xb)')\n\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(y - xb_est, 'o', color='b', lw=2, alpha=alpha, label='observed')\n ax.plot(f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')\n ax.plot(mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')\n ax.legend(loc='upper left')\n ax.set_title('Single Index Model (nonparametric)')\n\n plt.figure()\n plt.plot(y, xb_est+mean, '.')\n plt.title('observed versus fitted values')\n\n plt.show()\n",
"# coding: utf-8\n\n# DO NOT EDIT\n# Autogenerated from the notebook robust_models_0.ipynb.\n# Edit the notebook and then sync the output with this file.\n#\n# flake8: noqa\n# DO NOT EDIT\n\n# # Robust Linear Models\n\nimport numpy as np\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\n\n# ## Estimation\n#\n# Load data:\n\ndata = sm.datasets.stackloss.load()\ndata.exog = sm.add_constant(data.exog)\n\n# Huber's T norm with the (default) median absolute deviation scaling\n\nhuber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())\nhub_results = huber_t.fit()\nprint(hub_results.params)\nprint(hub_results.bse)\nprint(\n hub_results.summary(\n yname='y',\n xname=['var_%d' % i for i in range(len(hub_results.params))]))\n\n# Huber's T norm with 'H2' covariance matrix\n\nhub_results2 = huber_t.fit(cov=\"H2\")\nprint(hub_results2.params)\nprint(hub_results2.bse)\n\n# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance\n# matrix\n\nandrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())\nandrew_results = andrew_mod.fit(\n scale_est=sm.robust.scale.HuberScale(), cov=\"H3\")\nprint('Parameters: ', andrew_results.params)\n\n# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale``\n# for scale options\n#\n# ## Comparing OLS and RLM\n#\n# Artificial data with outliers:\n\nnsample = 50\nx1 = np.linspace(0, 20, nsample)\nX = np.column_stack((x1, (x1 - 5)**2))\nX = sm.add_constant(X)\nsig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger\nbeta = [5, 0.5, -0.0]\ny_true2 = np.dot(X, beta)\ny2 = y_true2 + sig * 1. * np.random.normal(size=nsample)\ny2[[39, 41, 43, 45, 48]] -= 5 # add some outliers (10% of nsample)\n\n# ### Example 1: quadratic function with linear truth\n#\n# Note that the quadratic term in OLS regression will capture outlier\n# effects.\n\nres = sm.OLS(y2, X).fit()\nprint(res.params)\nprint(res.bse)\nprint(res.predict())\n\n# Estimate RLM:\n\nresrlm = sm.RLM(y2, X).fit()\nprint(resrlm.params)\nprint(resrlm.bse)\n\n# Draw a plot to compare OLS estimates to the robust estimates:\n\nfig = plt.figure(figsize=(12, 8))\nax = fig.add_subplot(111)\nax.plot(x1, y2, 'o', label=\"data\")\nax.plot(x1, y_true2, 'b-', label=\"True\")\nprstd, iv_l, iv_u = wls_prediction_std(res)\nax.plot(x1, res.fittedvalues, 'r-', label=\"OLS\")\nax.plot(x1, iv_u, 'r--')\nax.plot(x1, iv_l, 'r--')\nax.plot(x1, resrlm.fittedvalues, 'g.-', label=\"RLM\")\nax.legend(loc=\"best\")\n\n# ### Example 2: linear function with linear truth\n#\n# Fit a new OLS model using only the linear term and the constant:\n\nX2 = X[:, [0, 1]]\nres2 = sm.OLS(y2, X2).fit()\nprint(res2.params)\nprint(res2.bse)\n\n# Estimate RLM:\n\nresrlm2 = sm.RLM(y2, X2).fit()\nprint(resrlm2.params)\nprint(resrlm2.bse)\n\n# Draw a plot to compare OLS estimates to the robust estimates:\n\nprstd, iv_l, iv_u = wls_prediction_std(res2)\n\nfig, ax = plt.subplots(figsize=(8, 6))\nax.plot(x1, y2, 'o', label=\"data\")\nax.plot(x1, y_true2, 'b-', label=\"True\")\nax.plot(x1, res2.fittedvalues, 'r-', label=\"OLS\")\nax.plot(x1, iv_u, 'r--')\nax.plot(x1, iv_l, 'r--')\nax.plot(x1, resrlm2.fittedvalues, 'g.-', label=\"RLM\")\nlegend = ax.legend(loc=\"best\")\n",
"\"\"\"\nRun all python examples to make sure they do not raise\n\"\"\"\nimport tempfile\n\nSHOW_PLOT = False\nBAD_FILES = ['robust_models_1']\n\n\ndef no_show(*args):\n pass\n\n\nif __name__ == '__main__':\n import glob\n import sys\n import matplotlib.pyplot as plt\n\n if not SHOW_PLOT:\n PLT_SHOW = plt.show\n plt.show = no_show\n\n SAVE_STDOUT = sys.stdout\n SAVE_STDERR = sys.stderr\n REDIRECT_STDOUT = tempfile.TemporaryFile('w')\n REDIRECT_STDERR = tempfile.TemporaryFile('w')\n\n EXAMPLE_FILES = glob.glob('python/*.py')\n for example in EXAMPLE_FILES:\n KNOWN_BAD_FILE = any([bf in example for bf in BAD_FILES])\n with open(example, 'r') as pyfile:\n code = pyfile.read()\n try:\n sys.stdout = REDIRECT_STDOUT\n sys.stderr = REDIRECT_STDERR\n exec(code)\n except Exception as e:\n sys.stderr = SAVE_STDERR\n print('FAIL: {0}'.format(example), file=sys.stderr)\n if KNOWN_BAD_FILE:\n print('This FAIL is expected', file=sys.stderr)\n else:\n print('The last error was: ', file=sys.stderr)\n print(e.__class__.__name__, file=sys.stderr)\n print(e, file=sys.stderr)\n else:\n sys.stdout = SAVE_STDOUT\n print('SUCCESS: {0}'.format(example))\n finally:\n plt.close('all')\n\n REDIRECT_STDOUT.close()\n REDIRECT_STDERR.close()\n sys.stdout = SAVE_STDOUT\n sys.stderr = SAVE_STDERR\n if not SHOW_PLOT:\n plt.show = PLT_SHOW\n"
] |
[
[
"matplotlib.pyplot.plot",
"numpy.arange",
"numpy.array",
"numpy.abs"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"scipy.stats.norm.ppf",
"numpy.sqrt",
"scipy.stats.rankdata",
"numpy.power",
"numpy.asarray",
"numpy.abs",
"numpy.concatenate",
"numpy.atleast_1d",
"numpy.apply_along_axis",
"numpy.mean",
"numpy.choose"
],
[
"numpy.log",
"numpy.eye",
"matplotlib.pyplot.subplots",
"numpy.std",
"pandas.date_range",
"numpy.diag_indices",
"numpy.array"
],
[
"numpy.dot",
"numpy.random.seed",
"matplotlib.pyplot.title",
"numpy.squeeze",
"matplotlib.pyplot.plot",
"numpy.random.uniform",
"numpy.cov",
"numpy.random.randint",
"numpy.column_stack",
"numpy.corrcoef",
"numpy.argsort",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.random.normal",
"numpy.column_stack",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.close"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mountain/self
|
[
"189e00e810d4d719fa6b37b400eef17d2521a64c"
] |
[
"src/gym_selfx/render/draw.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Python version Copyright (c) 2015 John Stowers\n#\n# This software is provided 'as-is', without any express or implied\n# warranty. In no event will the authors be held liable for any damages\n# arising from the use of this software.\n# Permission is granted to anyone to use this software for any purpose,\n# including commercial applications, and to alter it and redistribute it\n# freely, subject to the following restrictions:\n# 1. The origin of this software must not be misrepresented; you must not\n# claim that you wrote the original software. If you use this software\n# in a product, an acknowledgment in the product documentation would be\n# appreciated but is not required.\n# 2. Altered source versions must be plainly marked as such, and must not be\n# misrepresented as being the original software.\n# 3. This notice may not be removed or altered from any source distribution.\n\nimport cv2\nimport random\nimport numpy as np\n\nfrom Box2D import (b2Color, b2DistanceJoint, b2MouseJoint, b2PulleyJoint)\nfrom Box2D.Box2D import (b2_staticBody as staticBody, b2_dynamicBody as dynamicBody, b2_kinematicBody as kinematicBody, b2PolygonShape as polygonShape,\n b2CircleShape as circleShape, b2LoopShape as loopShape, b2EdgeShape as edgeShape)\n\nimport matplotlib.pyplot as plt\n\n\ndef cvcolor(color):\n return int(255.0 * color[2]), int(255.0 * color[1]), int(255.0 * color[0])\n\n\ndef cvcoord(pos):\n return tuple(map(int, pos))\n\n\nclass OpencvDrawFuncs(object):\n\n def __init__(self, w, h, ppm, fill_polygon=True, flip_y=True):\n self._w = w\n self._h = h\n self._ppm = ppm\n self._colors = {\n staticBody: (255, 255, 255),\n dynamicBody: (255, 0, 0),\n kinematicBody: (127, 255, 230),\n }\n self._fill_polygon = fill_polygon\n self._flip_y = flip_y\n self.screen = np.zeros((self._h, self._w, 3), np.uint8)\n\n def install(self):\n polygonShape.draw = self._draw_polygon\n circleShape.draw = self._draw_circle\n loopShape.draw = self._draw_loop\n edgeShape.draw = self._draw_edge\n\n def draw_world(self, world):\n for body in world.bodies:\n for fixture in body.fixtures:\n fixture.shape.draw(body, fixture)\n\n for joint in world.joints:\n self._draw_joint(joint)\n\n def clear_screen(self, screen=None):\n if screen is None:\n self.screen.fill(0)\n else:\n self.screen = screen\n\n def _fix_vertices(self, vertices):\n if self._flip_y:\n return [(v[0], self._h - v[1]) for v in vertices]\n else:\n return [(v[0], v[1]) for v in vertices]\n\n def _draw_joint(self, joint):\n bodyA, bodyB = joint.bodyA, joint.bodyB\n xf1, xf2 = bodyA.transform, bodyB.transform\n x1, x2 = xf1.position, xf2.position\n p1, p2 = joint.anchorA, joint.anchorB\n color = b2Color(0.5, 0.8, 0.8)\n\n x1, x2, p1, p2 = self._fix_vertices((x1 * self._ppm, x2 * self._ppm,\n p1 * self._ppm, p2 * self._ppm))\n\n if isinstance(joint, b2DistanceJoint):\n cv2.line(self.screen, cvcoord(p1), cvcoord(p2), cvcolor(color), 1)\n elif isinstance(joint, b2PulleyJoint):\n s1, s2 = joint.groundAnchorA, joint.groundAnchorB\n s1, s2 = self._fix_vertices((s1 * self._ppm, s2 * self._ppm))\n cv2.line(self.screen, cvcoord(s1), cvcoord(p1), cvcolor(color), 1)\n cv2.line(self.screen, cvcoord(s2), cvcoord(p2), cvcolor(color), 1)\n cv2.line(self.screen, cvcoord(s1), cvcoord(s2), cvcolor(color), 1)\n elif isinstance(joint, b2MouseJoint):\n pass # don't draw it here\n else:\n cv2.line(self.screen, cvcoord(x1), cvcoord(p1), cvcolor(color), 1)\n cv2.line(self.screen, cvcoord(p1), cvcoord(p2), cvcolor(color), 1)\n cv2.line(self.screen, cvcoord(x2), cvcoord(p2), cvcolor(color), 1)\n\n def _draw_polygon(self, body, fixture):\n polygon = fixture.shape\n\n transform = body.transform\n vertices = self._fix_vertices([transform * v * self._ppm\n for v in polygon.vertices])\n\n pts = np.array(vertices, np.int32)\n pts = pts.reshape((-1, 1, 2))\n cv2.polylines(self.screen, [pts], True, self._colors[body.type])\n\n if self._fill_polygon:\n lightc = np.array(self._colors[body.type], dtype=int) * 0.5\n cv2.fillPoly(self.screen, [pts], lightc)\n\n def _draw_circle(self, body, fixture):\n circle = fixture.shape\n position = self._fix_vertices(\n [body.transform * circle.pos * self._ppm])[0]\n\n if self._fill_polygon:\n cv2.circle(self.screen, cvcoord(position), int(\n circle.radius * self._ppm), body.userData['color'], -1)\n else:\n cv2.circle(self.screen, cvcoord(position), int(\n circle.radius * self._ppm), body.userData['color'], 1)\n\n def _draw_edge(self, body, fixture):\n edge = fixture.shape\n v = [body.transform * edge.vertex1 * self._ppm,\n body.transform * edge.vertex2 * self._ppm]\n vertices = self._fix_vertices(v)\n cv2.line(self.screen, cvcoord(vertices[0]),\n cvcoord(vertices[1]), self._colors[body.type], 1)\n\n def _draw_loop(self, body, fixture):\n loop = fixture.shape\n transform = body.transform\n vertices = self._fix_vertices([transform * v * self._ppm\n for v in loop.vertices])\n v1 = vertices[-1]\n for v2 in vertices:\n cv2.line(self.screen, cvcoord(v1), cvcoord(v2),\n self._colors[body.type], 1)\n v1 = v2\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gunkaynar/cs464_hw2
|
[
"087cf7eab185644f5197891965f7a5ced93be447",
"087cf7eab185644f5197891965f7a5ced93be447"
] |
[
"q2/q2.py",
"q3/q3.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndf = pd.read_csv(\"question-2-features.csv\")\ndf2 = pd.read_csv(\"question-2-labels.csv\")\nx_train = df.to_numpy()\ny_train = df2.to_numpy()\n\ndef add_bias(x):\n if (len(x.shape)==1):\n x=x[:,np.newaxis]\n b=np.ones((x.shape[0],1))\n x=np.concatenate((b,x), axis=1)\n return x\n\ndef train(train_features, train_values):\n coefficients = np.dot(train_features.T, train_features)\n coefficients = np.linalg.inv(coefficients)\n coefficients = np.dot(coefficients, train_features.T)\n coefficients = np.dot(coefficients, train_values)\n return coefficients\n\n\ndef find_predictions(coefficients, features):\n predictions = 0\n x = features.T\n for i in range(coefficients.shape[0]):\n predictions += coefficients[i][0] * x[i]\n return predictions\n\n\n\ndef plot_curve(features, y_train, predictions):\n plt.plot(features[:,1],y_train,\"bo\",label='ground truth prices')\n plt.plot(features[:,1],predictions,\"ro\",label='predicted prices')\n plt.xlabel('lstat', color='#1C2833')\n plt.ylabel('price', color='#1C2833')\n plt.title('lstat vs price curve')\n plt.legend(loc='upper right')\n plt.savefig(\"plot1.png\")\n plt.show()\n\n\n\n\ndef find_mse(y_train, predictions):\n sum = 0\n for i in range(len(predictions)):\n dif = y_train[i][0] - predictions[i]\n sum += dif**2\n \n mse = sum / (i+1)\n return mse\n\nlstat = (add_bias(x_train[:,12]))\ncoefficients = train(lstat,y_train)\nprint(coefficients)\npredictions = find_predictions(coefficients,lstat)\nplot_curve(lstat,y_train,predictions)\n\nMSE = find_mse(y_train,predictions)\nprint(MSE)\n\n\nlstat2 = np.reshape(np.square(x_train[:,12]),(506,1))\nfeatures2 = np.append(lstat,lstat2,axis=1)\ncoefficients2 = train(features2,y_train)\nprint(coefficients2)\npredictions2 = find_predictions(coefficients2,features2)\nplot_curve(features2,y_train,predictions2)\nMSE2 = find_mse(y_train,predictions2)\nprint(MSE2)\n\n\n\n\n\n\n\n\n",
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ndf = pd.read_csv(\"question-3-features-train.csv\")\nx_train = df.to_numpy()\ndf = pd.read_csv(\"question-3-labels-train.csv\")\ny_train = df.to_numpy()\ndf = pd.read_csv(\"question-3-features-test.csv\")\nx_test = df.to_numpy()\ndf = pd.read_csv(\"question-3-labels-test.csv\")\ny_test = df.to_numpy()\n\n\ndef normalize(x):\n min_ = np.min(x)\n max_ = np.max(x)\n range_ = max_ - min_\n\n return [(a - min_) / range_ for a in x]\n\n\n\ndef initialize_parameters(dimension):\n w = np.zeros((dimension,1),dtype=float)\n b = 0.0\n return w,b\n\ndef sigmoid(z):\n return 1/((1+np.exp(-z))+0.000001)\n\ndef scores(labels, predicted_labels):\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n for n in range(0,len(labels)):\n if predicted_labels[n] == 1 and labels[n] == 1:\n TP +=1\n if predicted_labels[n] == 0 and labels[n] == 0:\n TN +=1\n if predicted_labels[n] == 1 and labels[n] == 0:\n FP +=1\n if predicted_labels[n] == 0 and labels[n] == 1:\n FN +=1\n accuracy = (TP+TN)/(TP+FP+FN+TN)\n precision = TP/(TP+FP)\n recall = TP/(TP+FN)\n npv = TN / (TN+FN)\n fpr = FP / (FP +TN)\n fdr = FP / (FP + TP)\n f1 = 2*(recall * precision) / (recall + precision)\n f2 = (5 * precision * recall) / (4 * precision + recall)\n print(\"accuracy = %.3f \" % accuracy)\n print(\"precision = %.3f \" % precision)\n print(\"recall = %.3f \" % recall)\n print(\"npv = %.3f \" % npv)\n print(\"fpr = %.3f \" % fpr)\n print(\"fdr = %.3f \" % fdr)\n print(\"f1 = %.3f \" % f1)\n print(\"f2 = %.3f \" % f2)\n\n return TP, TN, FP, FN\n\n\nm = np.shape(x_train)[0]\nm_test = np.shape(x_test)[0]\ndimension = np.shape(x_train)[1]\n\nx_train = np.array(normalize(x_train))\ny_predictions = []\nlearning_rate = [0.1,0.01,0.001,0.0001,0.00001]\n \nfor lr in learning_rate:\n w, b = initialize_parameters(dimension)\n print(\"learning rate is \" + str(lr))\n for i in range(1000):\n log_odds = np.array(np.dot(w.T,x_train.T) + b , dtype=np.float128)\n y_pred_proba = sigmoid(log_odds)\n log_likelihood = (1/m) * np.squeeze(np.dot(y_train.T,np.log(y_pred_proba.T)) + np.dot((1-y_train.T),np.log(1-y_pred_proba.T)))\n dw = np.dot(x_train.T,(y_train - y_pred_proba.T))\n db = np.sum(y_train - y_pred_proba.T)\n w = w + lr*dw\n b = b + lr*db\n if(i%100==0):\n print(\"log likelihood in \"+ str(i) +\" iterations is \" + str(log_likelihood))\n print(log_likelihood)\n y_predicted_test = np.zeros((m_test, 1))\n log_odds_test = np.dot(w.T,x_test.T) + b\n y_pred_proba_test = sigmoid(log_odds_test)\n for i in range(y_pred_proba_test.shape[1]):\n if y_pred_proba_test[0, i] > 0.5 :\n y_predicted_test[i,0] = 1\n else:\n y_predicted_test[i,0] = 0\n y_predictions.append(y_predicted_test)\n TP, TN, FP, FN = scores(y_test,y_predicted_test)\n print(\"TP \" +str(TP))\n print(\"TN \" +str(TN))\n print(\"FP \" +str(FP))\n print(\"FN \" +str(FN))\n\ny_predictions = np.array(y_predictions)\ny_predictions = np.reshape(y_predictions,(5,179))\n \n\n "
] |
[
[
"numpy.square",
"numpy.dot",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.linalg.inv",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.append",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.dot",
"numpy.log",
"pandas.read_csv",
"numpy.min",
"numpy.reshape",
"numpy.max",
"numpy.shape",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jeandersonbc/pandas
|
[
"19a1072f1c96638025201c2e6a5805e749e9005c"
] |
[
"pandas/core/generic.py"
] |
[
"import collections\nfrom datetime import timedelta\nimport functools\nimport gc\nimport json\nimport operator\nimport pickle\nimport re\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n FrozenSet,\n Hashable,\n List,\n Mapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n)\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._config import config\n\nfrom pandas._libs import Timestamp, iNaT, lib\nfrom pandas._typing import (\n Axis,\n FilePathOrBuffer,\n FrameOrSeries,\n JSONSerializable,\n Label,\n Level,\n Renamer,\n)\nfrom pandas.compat import set_function_name\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_bool_kwarg,\n validate_fillna_kwargs,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_float,\n is_integer,\n is_list_like,\n is_number,\n is_numeric_dtype,\n is_object_dtype,\n is_period_arraylike,\n is_re_compilable,\n is_scalar,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas as pd\nfrom pandas.core import missing, nanops\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import PandasObject, SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.core.indexes.api import (\n Index,\n InvalidIndexError,\n MultiIndex,\n RangeIndex,\n ensure_index,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import Period, PeriodIndex\nimport pandas.core.indexing as indexing\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.missing import find_valid_index\nfrom pandas.core.ops import _align_method_FRAME\n\nfrom pandas.io.formats import format as fmt\nfrom pandas.io.formats.format import DataFrameFormatter, format_percentiles\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.tseries.frequencies import to_offset\n\nif TYPE_CHECKING:\n from pandas.core.resample import Resampler\n\n# goal is to be able to define the docs close to function, while still being\n# able to share\n_shared_docs: Dict[str, str] = dict()\n_shared_doc_kwargs = dict(\n axes=\"keywords for axes\",\n klass=\"Series/DataFrame\",\n axes_single_arg=\"int or labels for object\",\n args_transpose=\"axes to permute (int or label for object)\",\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names to sort by\"\"\",\n)\n\n\ndef _single_replace(self, to_replace, method, inplace, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n if self.ndim != 1:\n raise TypeError(\n f\"cannot replace {to_replace} with method {method} on a \"\n f\"{type(self).__name__}\"\n )\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)\n\n if inplace:\n self._update_inplace(result._data)\n return\n\n return result\n\n\nbool_t = bool # Need alias because NDFrame has def bool:\n\n\nclass NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):\n \"\"\"\n N-dimensional analogue of DataFrame. Store multi-dimensional in a\n size-mutable, labeled data structure\n\n Parameters\n ----------\n data : BlockManager\n axes : list\n copy : bool, default False\n \"\"\"\n\n _internal_names: List[str] = [\n \"_data\",\n \"_cacher\",\n \"_item_cache\",\n \"_cache\",\n \"_is_copy\",\n \"_subtyp\",\n \"_name\",\n \"_index\",\n \"_default_kind\",\n \"_default_fill_value\",\n \"_metadata\",\n \"__array_struct__\",\n \"__array_interface__\",\n ]\n _internal_names_set: Set[str] = set(_internal_names)\n _accessors: Set[str] = set()\n _deprecations: FrozenSet[str] = frozenset([\"get_values\"])\n _metadata: List[str] = []\n _is_copy = None\n _data: BlockManager\n _attrs: Dict[Optional[Hashable], Any]\n _typ: str\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data: BlockManager,\n copy: bool = False,\n attrs: Optional[Mapping[Optional[Hashable], Any]] = None,\n ):\n # copy kwarg is retained for mypy compat, is not used\n\n object.__setattr__(self, \"_is_copy\", None)\n object.__setattr__(self, \"_data\", data)\n object.__setattr__(self, \"_item_cache\", {})\n if attrs is None:\n attrs = {}\n else:\n attrs = dict(attrs)\n object.__setattr__(self, \"_attrs\", attrs)\n\n @classmethod\n def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):\n \"\"\" passed a manager and a axes dict \"\"\"\n for a, axe in axes.items():\n if axe is not None:\n mgr = mgr.reindex_axis(\n axe, axis=cls._get_block_manager_axis(a), copy=False\n )\n\n # make a copy if explicitly requested\n if copy:\n mgr = mgr.copy()\n if dtype is not None:\n # avoid further copies if we can\n if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:\n mgr = mgr.astype(dtype=dtype)\n return mgr\n\n # ----------------------------------------------------------------------\n\n @property\n def attrs(self) -> Dict[Optional[Hashable], Any]:\n \"\"\"\n Dictionary of global attributes on this object.\n\n .. warning::\n\n attrs is experimental and may change without warning.\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:\n self._attrs = dict(value)\n\n @classmethod\n def _validate_dtype(cls, dtype):\n \"\"\" validate the passed dtype \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # a compound dtype\n if dtype.kind == \"V\":\n raise NotImplementedError(\n \"compound dtypes are not implemented \"\n f\"in the {cls.__name__} constructor\"\n )\n\n return dtype\n\n # ----------------------------------------------------------------------\n # Construction\n\n @property\n def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:\n \"\"\"\n Used when a manipulation result has the same dimensions as the\n original.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_sliced(self):\n \"\"\"\n Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_expanddim(self):\n \"\"\"\n Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame()\n \"\"\"\n raise NotImplementedError\n\n # ----------------------------------------------------------------------\n # Axis\n _AXIS_ALIASES = {\"rows\": 0}\n _AXIS_IALIASES = {0: \"rows\"}\n _stat_axis_number = 0\n _stat_axis_name = \"index\"\n _ix = None\n _AXIS_ORDERS: List[str]\n _AXIS_NUMBERS: Dict[str, int]\n _AXIS_NAMES: Dict[int, str]\n _AXIS_REVERSED: bool\n _info_axis_number: int\n _info_axis_name: str\n _AXIS_LEN: int\n\n def _construct_axes_dict(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n @classmethod\n def _construct_axes_from_arguments(\n cls, args, kwargs, require_all: bool = False, sentinel=None\n ):\n \"\"\"\n Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n \"\"\"\n # construct the args\n args = list(args)\n for a in cls._AXIS_ORDERS:\n\n # look for a argument by position\n if a not in kwargs:\n try:\n kwargs[a] = args.pop(0)\n except IndexError as err:\n if require_all:\n raise TypeError(\n \"not enough/duplicate arguments specified!\"\n ) from err\n\n axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}\n return axes, kwargs\n\n @classmethod\n def _get_axis_number(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if is_integer(axis):\n if axis in cls._AXIS_NAMES:\n return axis\n else:\n try:\n return cls._AXIS_NUMBERS[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n @classmethod\n def _get_axis_name(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if isinstance(axis, str):\n if axis in cls._AXIS_NUMBERS:\n return axis\n else:\n try:\n return cls._AXIS_NAMES[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n def _get_axis(self, axis):\n name = self._get_axis_name(axis)\n return getattr(self, name)\n\n @classmethod\n def _get_block_manager_axis(cls, axis):\n \"\"\"Map the axis to the block_manager axis.\"\"\"\n axis = cls._get_axis_number(axis)\n if cls._AXIS_REVERSED:\n m = cls._AXIS_LEN - 1\n return m - axis\n return axis\n\n def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:\n # index or columns\n axis_index = getattr(self, axis)\n d = dict()\n prefix = axis[0]\n\n for i, name in enumerate(axis_index.names):\n if name is not None:\n key = level = name\n else:\n # prefix with 'i' or 'c' depending on the input axis\n # e.g., you must do ilevel_0 for the 0th level of an unnamed\n # multiiindex\n key = f\"{prefix}level_{i}\"\n level = i\n\n level_values = axis_index.get_level_values(level)\n s = level_values.to_series()\n s.index = axis_index\n d[key] = s\n\n # put the index/columns itself in the dict\n if isinstance(axis_index, MultiIndex):\n dindex = axis_index\n else:\n dindex = axis_index.to_series()\n\n d[axis] = dindex\n return d\n\n def _get_index_resolvers(self) -> Dict[str, ABCSeries]:\n from pandas.core.computation.parsing import clean_column_name\n\n d: Dict[str, ABCSeries] = {}\n for axis_name in self._AXIS_ORDERS:\n d.update(self._get_axis_resolvers(axis_name))\n\n return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}\n\n def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:\n \"\"\"\n Return the special character free column resolvers of a dataframe.\n\n Column names with special characters are 'cleaned up' so that they can\n be referred to by backtick quoting.\n Used in :meth:`DataFrame.eval`.\n \"\"\"\n from pandas.core.computation.parsing import clean_column_name\n\n if isinstance(self, ABCSeries):\n return {clean_column_name(self.name): self}\n\n return {\n clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)\n }\n\n @property\n def _info_axis(self):\n return getattr(self, self._info_axis_name)\n\n @property\n def _stat_axis(self):\n return getattr(self, self._stat_axis_name)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Return a tuple of axis dimensions\n \"\"\"\n return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return index label(s) of the internal NDFrame\n \"\"\"\n # we do it this way because if we have reversed axes, then\n # the block manager shows then reversed\n return [self._get_axis(a) for a in self._AXIS_ORDERS]\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n \"\"\"\n return self._data.ndim\n\n @property\n def size(self) -> int:\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n @property\n def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n \"\"\"\n Assign desired index to given axis.\n\n Indexes for%(extended_summary_sub)s row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : %(axes_single_arg)s, default 0\n The axis to update. The value 0 identifies the rows%(axis_description_sub)s.\n\n inplace : bool, default False\n Whether to return a new %(klass)s instance.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of type %(klass)s if inplace=False, None otherwise.\n\n See Also\n --------\n %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.\n \"\"\"\n if inplace:\n setattr(self, self._get_axis_name(axis), labels)\n else:\n obj = self.copy()\n obj.set_axis(labels, axis=axis, inplace=True)\n return obj\n\n def _set_axis(self, axis: int, labels: Index) -> None:\n labels = ensure_index(labels)\n self._data.set_axis(axis, labels)\n self._clear_item_cache()\n\n def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n \"\"\"\n i = self._get_axis_number(axis1)\n j = self._get_axis_number(axis2)\n\n if i == j:\n if copy:\n return self.copy()\n return self\n\n mapping = {i: j, j: i}\n\n new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))\n new_values = self.values.swapaxes(i, j)\n if copy:\n new_values = new_values.copy()\n\n return self._constructor(new_values, *new_axes).__finalize__(self)\n\n def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the level(s) is removed:\n\n * 0 or 'index': remove level(s) in column.\n * 1 or 'columns': remove level(s) in row.\n\n Returns\n -------\n DataFrame\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level_2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n labels = self._get_axis(axis)\n new_labels = labels.droplevel(level)\n result = self.set_axis(new_labels, axis=axis, inplace=False)\n return result\n\n def pop(self: FrameOrSeries, item) -> FrameOrSeries:\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n result = self[item]\n del self[item]\n try:\n result._reset_cacher()\n except AttributeError:\n pass\n\n return result\n\n def squeeze(self, axis=None):\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n \"\"\"\n axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)\n return self.iloc[\n tuple(\n 0 if i in axis and len(a) == 1 else slice(None)\n for i, a in enumerate(self.axes)\n )\n ]\n\n # ----------------------------------------------------------------------\n # Rename\n\n def rename(\n self: FrameOrSeries,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don't throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame.\n dict-like or functions are transformations to apply to\n that axis' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn't have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"C\": \"c\"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n \"\"\"\n if mapper is None and index is None and columns is None:\n raise TypeError(\"must pass an index to rename\")\n\n if index is not None or columns is not None:\n if axis is not None:\n raise TypeError(\n \"Cannot specify both 'axis' and any of 'index' or 'columns'\"\n )\n elif mapper is not None:\n raise TypeError(\n \"Cannot specify both 'mapper' and any of 'index' or 'columns'\"\n )\n else:\n # use the mapper argument\n if axis and self._get_axis_number(axis) == 1:\n columns = mapper\n else:\n index = mapper\n\n result = self if inplace else self.copy(deep=copy)\n\n for axis_no, replacements in enumerate((index, columns)):\n if replacements is None:\n continue\n\n ax = self._get_axis(axis_no)\n baxis = self._get_block_manager_axis(axis_no)\n f = com.get_rename_function(replacements)\n\n if level is not None:\n level = ax._get_level_number(level)\n\n # GH 13473\n if not callable(replacements):\n indexer = ax.get_indexer_for(replacements)\n if errors == \"raise\" and len(indexer[indexer == -1]):\n missing_labels = [\n label\n for index, label in enumerate(replacements)\n if indexer[index] == -1\n ]\n raise KeyError(f\"{missing_labels} not found in axis\")\n\n result._data = result._data.rename_axis(\n f, axis=baxis, copy=copy, level=level\n )\n result._clear_item_cache()\n\n if inplace:\n self._update_inplace(result._data)\n return None\n else:\n return result.__finalize__(self)\n\n @rewrite_axis_style_signature(\"mapper\", [(\"copy\", True), (\"inplace\", False)])\n def rename_axis(self, mapper=lib.no_default, **kwargs):\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis(\"animal\")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"animal\")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(\n (), kwargs, sentinel=lib.no_default\n )\n copy = kwargs.pop(\"copy\", True)\n inplace = kwargs.pop(\"inplace\", False)\n axis = kwargs.pop(\"axis\", 0)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError(\n \"rename_axis() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if mapper is not lib.no_default:\n # Use v0.23 behavior if a scalar or list\n non_mapper = is_scalar(mapper) or (\n is_list_like(mapper) and not is_dict_like(mapper)\n )\n if non_mapper:\n return self._set_axis_name(mapper, axis=axis, inplace=inplace)\n else:\n raise ValueError(\"Use `.rename` to alter labels with a mapper.\")\n else:\n # Use new behavior. Means that index and/or columns\n # is specified\n result = self if inplace else self.copy(deep=copy)\n\n for axis in range(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is lib.no_default:\n continue\n non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))\n if non_mapper:\n newnames = v\n else:\n f = com.get_rename_function(v)\n curnames = self._get_axis(axis).names\n newnames = [f(name) for name in curnames]\n result._set_axis_name(newnames, axis=axis, inplace=True)\n if not inplace:\n return result\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to set the label. The value 0 or 'index' specifies index,\n and the value 1 or 'columns' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name(\"animal\")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [[\"mammal\"], ['dog', 'cat', 'monkey']])\n >>> df._set_axis_name([\"type\", \"name\"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n \"\"\"\n axis = self._get_axis_number(axis)\n idx = self._get_axis(axis).set_names(name)\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n renamed = self if inplace else self.copy()\n renamed.set_axis(idx, axis=axis, inplace=True)\n if not inplace:\n return renamed\n\n # ----------------------------------------------------------------------\n # Comparison Methods\n\n def _indexed_same(self, other) -> bool:\n return all(\n self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS\n )\n\n def equals(self, other):\n \"\"\"\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n testing.assert_series_equal : Raises an AssertionError if left and\n right are not equal. Provides an easy interface to ignore\n inequality in dtypes, indexes and precision among others.\n testing.assert_frame_equal : Like assert_series_equal, but targets\n DataFrames.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n \"\"\"\n if not isinstance(other, self._constructor):\n return False\n return self._data.equals(other._data)\n\n # -------------------------------------------------------------------------\n # Unary Methods\n\n def __neg__(self):\n values = com.values_from_object(self)\n if is_bool_dtype(values):\n arr = operator.inv(values)\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.neg(values)\n else:\n raise TypeError(f\"Unary negative expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __pos__(self):\n values = com.values_from_object(self)\n if is_bool_dtype(values) or is_period_arraylike(values):\n arr = values\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.pos(values)\n else:\n raise TypeError(f\"Unary plus expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __invert__(self):\n if not self.size:\n # inv fails with 0 len\n return self\n\n new_data = self._data.apply(operator.invert)\n result = self._constructor(new_data).__finalize__(self)\n return result\n\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n def bool(self):\n \"\"\"\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n\n Returns\n -------\n bool\n Same single boolean value converted to bool type.\n \"\"\"\n v = self.squeeze()\n if isinstance(v, (bool, np.bool_)):\n return bool(v)\n elif is_scalar(v):\n raise ValueError(\n \"bool cannot act on a non-boolean single element \"\n f\"{type(self).__name__}\"\n )\n\n self.__nonzero__()\n\n def __abs__(self: FrameOrSeries) -> FrameOrSeries:\n return self.abs()\n\n def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:\n return self.round(decimals)\n\n # -------------------------------------------------------------------------\n # Label or Level Combination Helpers\n #\n # A collection of helper methods for DataFrame/Series operations that\n # accept a combination of column/index labels and levels. All such\n # operations should utilize/extend these methods when possible so that we\n # have consistent precedence and validation logic throughout the library.\n\n def _is_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n \"\"\"\n axis = self._get_axis_number(axis)\n\n return (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and not self._is_label_reference(key, axis=axis)\n )\n\n def _is_label_reference(self, key, axis=0) -> bool_t:\n \"\"\"\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n return (\n key is not None\n and is_hashable(key)\n and any(key in self.axes[ax] for ax in other_axes)\n )\n\n def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:\n \"\"\"\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n \"\"\"\n return self._is_level_reference(key, axis=axis) or self._is_label_reference(\n key, axis=axis\n )\n\n def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:\n \"\"\"\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns).\n\n Raises\n ------\n ValueError: `key` is ambiguous\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n if (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and any(key in self.axes[ax] for ax in other_axes)\n ):\n\n # Build an informative and grammatical warning\n level_article, level_type = (\n (\"an\", \"index\") if axis == 0 else (\"a\", \"column\")\n )\n\n label_article, label_type = (\n (\"a\", \"column\") if axis == 0 else (\"an\", \"index\")\n )\n\n msg = (\n f\"'{key}' is both {level_article} {level_type} level and \"\n f\"{label_article} {label_type} label, which is ambiguous.\"\n )\n raise ValueError(msg)\n\n def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:\n \"\"\"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self._is_label_reference(key, axis=axis):\n self._check_label_or_level_ambiguity(key, axis=axis)\n values = self.xs(key, axis=other_axes[0])._values\n elif self._is_level_reference(key, axis=axis):\n values = self.axes[axis].get_level_values(key)._values\n else:\n raise KeyError(key)\n\n # Check for duplicates\n if values.ndim > 1:\n\n if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):\n multi_message = (\n \"\\n\"\n \"For a multi-index, the label must be a \"\n \"tuple with elements corresponding to each level.\"\n )\n else:\n multi_message = \"\"\n\n label_axis_name = \"column\" if axis == 0 else \"index\"\n raise ValueError(\n (\n f\"The {label_axis_name} label '{key}' \"\n f\"is not unique.{multi_message}\"\n )\n )\n\n return values\n\n def _drop_labels_or_levels(self, keys, axis: int = 0):\n \"\"\"\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n \"\"\"\n axis = self._get_axis_number(axis)\n\n # Validate keys\n keys = com.maybe_make_list(keys)\n invalid_keys = [\n k for k in keys if not self._is_label_or_level_reference(k, axis=axis)\n ]\n\n if invalid_keys:\n raise ValueError(\n (\n \"The following keys are not valid labels or \"\n f\"levels for axis {axis}: {invalid_keys}\"\n )\n )\n\n # Compute levels and labels to drop\n levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]\n\n labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]\n\n # Perform copy upfront and then use inplace operations below.\n # This ensures that we always perform exactly one copy.\n # ``copy`` and/or ``inplace`` options could be added in the future.\n dropped = self.copy()\n\n if axis == 0:\n # Handle dropping index levels\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n\n # Handle dropping columns labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n # Handle dropping column levels\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n # Drop the specified levels from the MultiIndex\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n # Drop the last level of Index by replacing with\n # a RangeIndex\n dropped.columns = RangeIndex(dropped.columns.size)\n\n # Handle dropping index labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n\n return dropped\n\n # ----------------------------------------------------------------------\n # Iteration\n\n def __hash__(self):\n raise TypeError(\n f\"{repr(type(self).__name__)} objects are mutable, \"\n f\"thus they cannot be hashed\"\n )\n\n def __iter__(self):\n \"\"\"\n Iterate over info axis.\n\n Returns\n -------\n iterator\n Info axis as iterator.\n \"\"\"\n return iter(self._info_axis)\n\n # can we get a better explanation of this?\n def keys(self):\n \"\"\"\n Get the 'info axis' (see Indexing for more).\n\n This is index for Series, columns for DataFrame.\n\n Returns\n -------\n Index\n Info axis.\n \"\"\"\n return self._info_axis\n\n def items(self):\n \"\"\"\n Iterate over (label, values) on info axis\n\n This is index for Series and columns for DataFrame.\n\n Returns\n -------\n Generator\n \"\"\"\n for h in self._info_axis:\n yield h, self[h]\n\n @Appender(items.__doc__)\n def iteritems(self):\n return self.items()\n\n def __len__(self) -> int:\n \"\"\"Returns length of info axis\"\"\"\n return len(self._info_axis)\n\n def __contains__(self, key) -> bool_t:\n \"\"\"True if the key is in the info axis\"\"\"\n return key in self._info_axis\n\n @property\n def empty(self) -> bool_t:\n \"\"\"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna : Return series without null values.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n \"\"\"\n return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)\n\n # ----------------------------------------------------------------------\n # Array Interface\n\n # This is also set in IndexOpsMixin\n # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented\n __array_priority__ = 1000\n\n def __array__(self, dtype=None) -> np.ndarray:\n return com.values_from_object(self)\n\n def __array_wrap__(self, result, context=None):\n result = lib.item_from_zerodim(result)\n if is_scalar(result):\n # e.g. we get here with np.ptp(series)\n # ptp also requires the item_from_zerodim\n return result\n d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)\n return self._constructor(result, **d).__finalize__(self)\n\n # ideally we would define this to avoid the getattr checks, but\n # is slower\n # @property\n # def __array_interface__(self):\n # \"\"\" provide numpy array interface method \"\"\"\n # values = self.values\n # return dict(typestr=values.dtype.str,shape=values.shape,data=values)\n\n # ----------------------------------------------------------------------\n # Picklability\n\n def __getstate__(self) -> Dict[str, Any]:\n meta = {k: getattr(self, k, None) for k in self._metadata}\n return dict(\n _data=self._data,\n _typ=self._typ,\n _metadata=self._metadata,\n attrs=self.attrs,\n **meta,\n )\n\n def __setstate__(self, state):\n\n if isinstance(state, BlockManager):\n self._data = state\n elif isinstance(state, dict):\n typ = state.get(\"_typ\")\n if typ is not None:\n attrs = state.get(\"_attrs\", {})\n object.__setattr__(self, \"_attrs\", attrs)\n\n # set in the order of internal names\n # to avoid definitional recursion\n # e.g. say fill_value needing _data to be\n # defined\n meta = set(self._internal_names + self._metadata)\n for k in list(meta):\n if k in state:\n v = state[k]\n object.__setattr__(self, k, v)\n\n for k, v in state.items():\n if k not in meta:\n object.__setattr__(self, k, v)\n\n else:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n elif len(state) == 2:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n\n self._item_cache = {}\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n # string representation based upon iterating over self\n # (since, by definition, `PandasContainers` are iterable)\n prepr = f\"[{','.join(map(pprint_thing, self))}]\"\n return f\"{type(self).__name__}({prepr})\"\n\n def _repr_latex_(self):\n \"\"\"\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n \"\"\"\n if config.get_option(\"display.latex.repr\"):\n return self.to_latex()\n else:\n return None\n\n def _repr_data_resource_(self):\n \"\"\"\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n \"\"\"\n if config.get_option(\"display.html.table_schema\"):\n data = self.head(config.get_option(\"display.max_rows\"))\n payload = json.loads(\n data.to_json(orient=\"table\"), object_pairs_hook=collections.OrderedDict\n )\n return payload\n\n # ----------------------------------------------------------------------\n # I/O Methods\n\n _shared_docs[\n \"to_markdown\"\n ] = \"\"\"\n Print %(klass)s in Markdown-friendly format.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n mode : str, optional\n Mode in which file is opened.\n **kwargs\n These parameters will be passed to `tabulate`.\n\n Returns\n -------\n str\n %(klass)s in Markdown-friendly format.\n \"\"\"\n\n _shared_docs[\n \"to_excel\"\n ] = \"\"\"\n Write %(klass)s to an Excel sheet.\n\n To write a single %(klass)s to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n See Also\n --------\n to_csv : Write DataFrame to a comma-separated values (csv) file.\n ExcelWriter : Class for writing DataFrame objects into excel sheets.\n read_excel : Read an Excel file into a pandas DataFrame.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n For compatibility with :meth:`~DataFrame.to_csv`,\n to_excel serializes lists and dicts to strings before writing.\n\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n Examples\n --------\n\n Create, write to and save a workbook:\n\n >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> df2 = df1.copy()\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n ExcelWriter can also be used to append to an existing Excel file:\n\n >>> with pd.ExcelWriter('output.xlsx',\n ... mode='a') as writer: # doctest: +SKIP\n ... df.to_excel(writer, sheet_name='Sheet_name_3')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n\n @Appender(_shared_docs[\"to_excel\"] % dict(klass=\"object\"))\n def to_excel(\n self,\n excel_writer,\n sheet_name=\"Sheet1\",\n na_rep=\"\",\n float_format=None,\n columns=None,\n header=True,\n index=True,\n index_label=None,\n startrow=0,\n startcol=0,\n engine=None,\n merge_cells=True,\n encoding=None,\n inf_rep=\"inf\",\n verbose=True,\n freeze_panes=None,\n ) -> None:\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n df,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def to_json(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n orient: Optional[str] = None,\n date_format: Optional[str] = None,\n double_precision: int = 10,\n force_ascii: bool_t = True,\n date_unit: str = \"ms\",\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n lines: bool_t = False,\n compression: Optional[str] = \"infer\",\n index: bool_t = True,\n indent: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Convert the object to a JSON string.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : str or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : str\n Indication of expected JSON string format.\n\n * Series:\n\n - default is 'index'\n - allowed values are: {'split','records','index','table'}.\n\n * DataFrame:\n\n - default is 'columns'\n - allowed values are: {'split', 'records', 'index', 'columns',\n 'values', 'table'}.\n\n * The format of the JSON string:\n\n - 'split' : dict like {'index' -> [index], 'columns' -> [columns],\n 'data' -> [values]}\n - 'records' : list like [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n - 'columns' : dict like {column -> {index -> value}}\n - 'values' : just the values array\n - 'table' : dict like {'schema': {schema}, 'data': {data}}\n\n Describing the data, where data component is like ``orient='records'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, 'epoch', 'iso'}\n Type of date conversion. 'epoch' = epoch milliseconds,\n 'iso' = ISO8601. The default depends on the `orient`. For\n ``orient='table'``, the default is 'iso'. For all other orients,\n the default is 'epoch'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : str, default 'ms' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If 'orient' is 'records' write out line delimited json format. Will\n throw ValueError if incorrect 'orient' since others are not list\n like.\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n 'infer' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is 'split' or 'table'.\n\n .. versionadded:: 0.23.0\n\n indent : int, optional\n Length of whitespace used to indent each record.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting json format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_json : Convert a JSON string to pandas object.\n\n Notes\n -----\n The behavior of ``indent=0`` varies from the stdlib, which does not\n indent the output but does insert newlines. Currently, ``indent=0``\n and the default ``indent=None`` are equivalent in pandas, though this\n may change in a future release.\n\n Examples\n --------\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df.to_json(orient='split')\n '{\"columns\":[\"col 1\",\"col 2\"],\n \"index\":[\"row 1\",\"row 2\"],\n \"data\":[[\"a\",\"b\"],[\"c\",\"d\"]]}'\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{\"row 1\":{\"col 1\":\"a\",\"col 2\":\"b\"},\"row 2\":{\"col 1\":\"c\",\"col 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:\n\n >>> df.to_json(orient='columns')\n '{\"col 1\":{\"row 1\":\"a\",\"row 2\":\"c\"},\"col 2\":{\"row 1\":\"b\",\"row 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'values'`` formatted JSON:\n\n >>> df.to_json(orient='values')\n '[[\"a\",\"b\"],[\"c\",\"d\"]]'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '{\"schema\": {\"fields\": [{\"name\": \"index\", \"type\": \"string\"},\n {\"name\": \"col 1\", \"type\": \"string\"},\n {\"name\": \"col 2\", \"type\": \"string\"}],\n \"primaryKey\": \"index\",\n \"pandas_version\": \"0.20.0\"},\n \"data\": [{\"index\": \"row 1\", \"col 1\": \"a\", \"col 2\": \"b\"},\n {\"index\": \"row 2\", \"col 1\": \"c\", \"col 2\": \"d\"}]}'\n \"\"\"\n from pandas.io import json\n\n if date_format is None and orient == \"table\":\n date_format = \"iso\"\n elif date_format is None:\n date_format = \"epoch\"\n\n config.is_nonnegative_int(indent)\n indent = indent or 0\n\n return json.to_json(\n path_or_buf=path_or_buf,\n obj=self,\n orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n force_ascii=force_ascii,\n date_unit=date_unit,\n default_handler=default_handler,\n lines=lines,\n compression=compression,\n index=index,\n indent=indent,\n )\n\n def to_hdf(\n self,\n path_or_buf,\n key: str,\n mode: str = \"a\",\n complevel: Optional[int] = None,\n complib: Optional[str] = None,\n append: bool_t = False,\n format: Optional[str] = None,\n index: bool_t = True,\n min_itemsize: Optional[Union[int, Dict[str, int]]] = None,\n nan_rep=None,\n dropna: Optional[bool_t] = None,\n data_columns: Optional[List[str]] = None,\n errors: str = \"strict\",\n encoding: str = \"UTF-8\",\n ) -> None:\n \"\"\"\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n append : bool, default False\n For Table formats, append the input data to the existing.\n format : {'fixed', 'table', None}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n - If None, pd.get_option('io.hdf.default_format') is checked,\n followed by fallback to \"fixed\"\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n encoding : str, default \"UTF-8\"\n min_itemsize : dict or int, optional\n Map column names to minimum string sizes for columns.\n nan_rep : Any, optional\n How to represent null values as str.\n Not allowed with append=True.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n \"\"\"\n from pandas.io import pytables\n\n pytables.to_hdf(\n path_or_buf,\n key,\n self,\n mode=mode,\n complevel=complevel,\n complib=complib,\n append=append,\n format=format,\n index=index,\n min_itemsize=min_itemsize,\n nan_rep=nan_rep,\n dropna=dropna,\n data_columns=data_columns,\n errors=errors,\n encoding=encoding,\n )\n\n def to_sql(\n self,\n name: str,\n con,\n schema=None,\n if_exists: str = \"fail\",\n index: bool_t = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n ) -> None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : str\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects. The user\n is responsible for engine disposal and connection closure for the SQLAlchemy\n connectable See `here \\\n <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.\n\n schema : str, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : str or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 legacy mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] https://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine('sqlite://', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql('users', con=engine)\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]\n\n >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})\n >>> df1.to_sql('users', con=engine, if_exists='append')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),\n (0, 'User 4'), (1, 'User 5')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql('users', con=engine, if_exists='replace',\n ... index_label='id')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 4'), (1, 'User 5')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({\"A\": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql('integers', con=engine, index=False,\n ... dtype={\"A\": Integer()})\n\n >>> engine.execute(\"SELECT * FROM integers\").fetchall()\n [(1,), (None,), (2,)]\n \"\"\"\n from pandas.io import sql\n\n sql.to_sql(\n self,\n name,\n con,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n )\n\n def to_pickle(\n self,\n path,\n compression: Optional[str] = \"infer\",\n protocol: int = pickle.HIGHEST_PROTOCOL,\n ) -> None:\n \"\"\"\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \\\n default 'infer'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values are 0, 1, 2, 3, 4. A negative value for the protocol\n parameter is equivalent to setting its value to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html.\n .. versionadded:: 0.21.0.\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({\"foo\": range(5), \"bar\": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle(\"./dummy.pkl\")\n\n >>> unpickled_df = pd.read_pickle(\"./dummy.pkl\")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove(\"./dummy.pkl\")\n \"\"\"\n from pandas.io.pickle import to_pickle\n\n to_pickle(self, path, compression=compression, protocol=protocol)\n\n def to_clipboard(\n self, excel: bool_t = True, sep: Optional[str] = None, **kwargs\n ) -> None:\n r\"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n Produce output in a csv format for easy pasting into excel.\n\n - True, use the provided separator for csv pasting.\n - False, write a string representation of the object to the clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n \"\"\"\n from pandas.io import clipboards\n\n clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)\n\n def to_xarray(self):\n \"\"\"\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <https://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot',\n ... 'falcon', 'parrot'],\n ... 'speed': [350, 18, 361, 15]})\n >>> df_multiindex = df_multiindex.set_index(['date', 'animal'])\n\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n \"\"\"\n xarray = import_optional_dependency(\"xarray\")\n\n if self.ndim == 1:\n return xarray.DataArray.from_series(self)\n else:\n return xarray.Dataset.from_dataframe(self)\n\n @Substitution(returns=fmt.return_docstring)\n def to_latex(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n bold_rows=False,\n column_format=None,\n longtable=None,\n escape=None,\n encoding=None,\n decimal=\".\",\n multicolumn=None,\n multicolumn_format=None,\n multirow=None,\n caption=None,\n label=None,\n ):\n r\"\"\"\n Render object to a LaTeX tabular, longtable, or nested table/tabular.\n\n Requires ``\\usepackage{booktabs}``. The output can be copy/pasted\n into a main LaTeX document or read from an external file\n with ``\\input{table.tex}``.\n\n .. versionchanged:: 0.20.2\n Added to Series.\n\n .. versionchanged:: 1.0.0\n Added caption and label arguments.\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function or str, optional, default None\n Formatter for floating point numbers. For example\n ``float_format=\"%%.2f\"`` and ``float_format=\"{:0.2f}\".format`` will\n both result in 0.1234 being formatted as 0.12.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n caption : str, optional\n The LaTeX caption to be placed inside ``\\caption{}`` in the output.\n\n .. versionadded:: 1.0.0\n\n label : str, optional\n The LaTeX label to be placed inside ``\\label{}`` in the output.\n This is used with ``\\ref{}`` in the main ``.tex`` file.\n\n .. versionadded:: 1.0.0\n %(returns)s\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n \"\"\"\n # Get defaults from the pandas config\n if self.ndim == 1:\n self = self.to_frame()\n if longtable is None:\n longtable = config.get_option(\"display.latex.longtable\")\n if escape is None:\n escape = config.get_option(\"display.latex.escape\")\n if multicolumn is None:\n multicolumn = config.get_option(\"display.latex.multicolumn\")\n if multicolumn_format is None:\n multicolumn_format = config.get_option(\"display.latex.multicolumn_format\")\n if multirow is None:\n multirow = config.get_option(\"display.latex.multirow\")\n\n formatter = DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n )\n return formatter.to_latex(\n buf=buf,\n column_format=column_format,\n longtable=longtable,\n encoding=encoding,\n multicolumn=multicolumn,\n multicolumn_format=multicolumn_format,\n multirow=multirow,\n caption=caption,\n label=label,\n )\n\n def to_csv(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n sep: str = \",\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Sequence[Label]] = None,\n header: Union[bool_t, List[str]] = True,\n index: bool_t = True,\n index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,\n mode: str = \"w\",\n encoding: Optional[str] = None,\n compression: Optional[Union[str, Mapping[str, str]]] = \"infer\",\n quoting: Optional[int] = None,\n quotechar: str = '\"',\n line_terminator: Optional[str] = None,\n chunksize: Optional[int] = None,\n date_format: Optional[str] = None,\n doublequote: bool_t = True,\n escapechar: Optional[str] = None,\n decimal: Optional[str] = \".\",\n ) -> Optional[str]:\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string. If a file object is passed it should be opened with\n `newline=''`, disabling universal newlines.\n\n .. versionchanged:: 0.24.0\n\n Was previously named \"path\" for Series.\n\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default 'w'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n compression : str or dict, default 'infer'\n If str, represents compression mode. If dict, value at 'method' is\n the compression mode. Compression mode may be any of the following\n possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If\n compression mode is 'infer' and `path_or_buf` is path-like, then\n detect compression mode from the following extensions: '.gz',\n '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given\n and mode is 'zip' or inferred as 'zip', other entries passed as\n additional compression options.\n\n .. versionchanged:: 1.0.0\n\n May now be a dict with key 'method' as compression mode\n and other entries as additional compression options if\n compression mode is 'zip'.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default '.'\n Character recognized as decimal separator. E.g. use ',' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Write DataFrame to an Excel file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_csv(index=False)\n 'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'\n\n Create 'out.zip' containing 'out.csv'\n\n >>> compression_opts = dict(method='zip',\n ... archive_name='out.csv') # doctest: +SKIP\n >>> df.to_csv('out.zip', index=False,\n ... compression=compression_opts) # doctest: +SKIP\n \"\"\"\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.csvs import CSVFormatter\n\n formatter = CSVFormatter(\n df,\n path_or_buf,\n line_terminator=line_terminator,\n sep=sep,\n encoding=encoding,\n compression=compression,\n quoting=quoting,\n na_rep=na_rep,\n float_format=float_format,\n cols=columns,\n header=header,\n index=index,\n index_label=index_label,\n mode=mode,\n chunksize=chunksize,\n quotechar=quotechar,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal,\n )\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n return None\n\n # ----------------------------------------------------------------------\n # Lookup Caching\n\n def _set_as_cached(self, item, cacher) -> None:\n \"\"\"\n Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _reset_cacher(self) -> None:\n \"\"\"\n Reset the cacher.\n \"\"\"\n if hasattr(self, \"_cacher\"):\n del self._cacher\n\n def _maybe_cache_changed(self, item, value) -> None:\n \"\"\"\n The object has called back to us saying maybe it has changed.\n \"\"\"\n self._data.set(item, value)\n\n @property\n def _is_cached(self) -> bool_t:\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, \"_cacher\", None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n def _maybe_update_cacher(\n self, clear: bool_t = False, verify_is_copy: bool_t = True\n ) -> None:\n \"\"\"\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : bool, default False\n Clear the item cache.\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n ref = cacher[1]()\n\n # we are trying to reference a dead referant, hence\n # a copy\n if ref is None:\n del self._cacher\n else:\n # Note: we need to call ref._maybe_cache_changed even in the\n # case where it will raise. (Uh, not clear why)\n try:\n ref._maybe_cache_changed(cacher[0], self)\n except AssertionError:\n # ref._data.setitem can raise\n # AssertionError because of shape mismatch\n pass\n\n if verify_is_copy:\n self._check_setitem_copy(stacklevel=5, t=\"referant\")\n\n if clear:\n self._clear_item_cache()\n\n def _clear_item_cache(self) -> None:\n self._item_cache.clear()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def take(\n self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n is_copy : bool\n Before pandas 1.0, ``is_copy=False`` can be specified to ensure\n that the return value is an actual copy. Starting with pandas 1.0,\n ``take`` always returns a copy, and the keyword is therefore\n deprecated.\n\n .. deprecated:: 1.0.0\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n if is_copy is not None:\n warnings.warn(\n \"is_copy is deprecated and will be removed in a future version. \"\n \"'take' always returns a copy, so there is no need to specify this.\",\n FutureWarning,\n stacklevel=2,\n )\n\n nv.validate_take(tuple(), kwargs)\n\n self._consolidate_inplace()\n\n new_data = self._data.take(\n indices, axis=self._get_block_manager_axis(axis), verify=True\n )\n return self._constructor(new_data).__finalize__(self)\n\n def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:\n \"\"\"\n Internal version of the `take` method that sets the `_is_copy`\n attribute to keep track of the parent dataframe (using in indexing\n for the SettingWithCopyWarning).\n\n See the docstring of `take` for full explanation of the parameters.\n \"\"\"\n result = self.take(indices=indices, axis=axis)\n # Maybe set copy if we didn't actually change the index.\n if not result._get_axis(axis).equals(self._get_axis(axis)):\n result._set_is_copy(self)\n return result\n\n def xs(self, key, axis=0, level=None, drop_level: bool_t = True):\n \"\"\"\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n \"\"\"\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n if level is not None:\n loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)\n\n # create the tuple of the indexer\n _indexer = [slice(None)] * self.ndim\n _indexer[axis] = loc\n indexer = tuple(_indexer)\n\n result = self.iloc[indexer]\n setattr(result, result._get_axis_name(axis), new_ax)\n return result\n\n if axis == 1:\n return self[key]\n\n self._consolidate_inplace()\n\n index = self.index\n if isinstance(index, MultiIndex):\n loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)\n else:\n loc = self.index.get_loc(key)\n\n if isinstance(loc, np.ndarray):\n if loc.dtype == np.bool_:\n (inds,) = loc.nonzero()\n return self._take_with_is_copy(inds, axis=axis)\n else:\n return self._take_with_is_copy(loc, axis=axis)\n\n if not is_scalar(loc):\n new_index = self.index[loc]\n\n if is_scalar(loc):\n # In this case loc should be an integer\n if self.ndim == 1:\n # if we encounter an array-like and we only have 1 dim\n # that means that their are list/ndarrays inside the Series!\n # so just return them (GH 6394)\n return self._values[loc]\n\n new_values = self._data.fast_xs(loc)\n\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[loc],\n dtype=new_values.dtype,\n )\n\n else:\n result = self.iloc[loc]\n result.index = new_index\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n result._set_is_copy(self, copy=not result._is_view)\n return result\n\n _xs: Callable = xs\n\n def __getitem__(self, item):\n raise AbstractMethodError(self)\n\n def _get_item_cache(self, item):\n \"\"\"Return the cached item, item represents a label indexer.\"\"\"\n cache = self._item_cache\n res = cache.get(item)\n if res is None:\n values = self._data.get(item)\n res = self._box_item_values(item, values)\n cache[item] = res\n res._set_as_cached(item, self)\n\n # for a chain\n res._is_copy = self._is_copy\n return res\n\n def _box_item_values(self, key, values):\n raise AbstractMethodError(self)\n\n def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:\n \"\"\"\n Construct a slice of this container.\n\n Slicing with this method is *always* positional.\n \"\"\"\n assert isinstance(slobj, slice), type(slobj)\n axis = self._get_block_manager_axis(axis)\n result = self._constructor(self._data.get_slice(slobj, axis=axis))\n result = result.__finalize__(self)\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n is_copy = axis != 0 or result._is_view\n result._set_is_copy(self, copy=is_copy)\n return result\n\n def _set_item(self, key, value) -> None:\n self._data.set(key, value)\n self._clear_item_cache()\n\n def _set_is_copy(self, ref, copy: bool_t = True) -> None:\n if not copy:\n self._is_copy = None\n else:\n assert ref is not None\n self._is_copy = weakref.ref(ref)\n\n def _check_is_chained_assignment_possible(self) -> bool_t:\n \"\"\"\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(stacklevel=4, t=\"referant\", force=True)\n return True\n elif self._is_copy:\n self._check_setitem_copy(stacklevel=4, t=\"referant\")\n return False\n\n def _check_setitem_copy(self, stacklevel=4, t=\"setting\", force=False):\n \"\"\"\n\n Parameters\n ----------\n stacklevel : int, default 4\n the level to show of the stack when the error is output\n t : str, the type of setting error\n force : bool, default False\n If True, then force showing an error.\n\n validate if we are doing a setitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n \"\"\"\n # return early if the check is not needed\n if not (force or self._is_copy):\n return\n\n value = config.get_option(\"mode.chained_assignment\")\n if value is None:\n return\n\n # see if the copy is not actually referred; if so, then dissolve\n # the copy weakref\n if self._is_copy is not None and not isinstance(self._is_copy, str):\n r = self._is_copy()\n if not gc.get_referents(r) or r.shape == self.shape:\n self._is_copy = None\n return\n\n # a custom message\n if isinstance(self._is_copy, str):\n t = self._is_copy\n\n elif t == \"referant\":\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame\\n\\n\"\n \"See the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n else:\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame.\\n\"\n \"Try using .loc[row_indexer,col_indexer] = value \"\n \"instead\\n\\nSee the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n if value == \"raise\":\n raise com.SettingWithCopyError(t)\n elif value == \"warn\":\n warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)\n\n def __delitem__(self, key) -> None:\n \"\"\"\n Delete item\n \"\"\"\n deleted = False\n\n maybe_shortcut = False\n if self.ndim == 2 and isinstance(self.columns, MultiIndex):\n try:\n maybe_shortcut = key not in self.columns._engine\n except TypeError:\n pass\n\n if maybe_shortcut:\n # Allow shorthand to delete all columns whose first len(key)\n # elements match key:\n if not isinstance(key, tuple):\n key = (key,)\n for col in self.columns:\n if isinstance(col, tuple) and col[: len(key)] == key:\n del self[col]\n deleted = True\n if not deleted:\n # If the above loop ran and didn't delete anything because\n # there was no match, this call should raise the appropriate\n # exception:\n self._data.delete(key)\n\n # delete from the caches\n try:\n del self._item_cache[key]\n except KeyError:\n pass\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (ex: DataFrame column).\n\n Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n @property\n def _is_view(self) -> bool_t:\n \"\"\"Return boolean indicating if self is view of another array \"\"\"\n return self._data.is_view\n\n def reindex_like(\n self: FrameOrSeries,\n other,\n method: Optional[str] = None,\n copy: bool_t = True,\n limit=None,\n tolerance=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n \"\"\"\n d = other._construct_axes_dict(\n axes=self._AXIS_ORDERS,\n method=method,\n copy=copy,\n limit=limit,\n tolerance=tolerance,\n )\n\n return self.reindex(**d)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace: bool_t = False,\n errors: str = \"raise\",\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n axis_name = self._get_axis_name(axis)\n axes = {axis_name: labels}\n elif index is not None or columns is not None:\n axes, _ = self._construct_axes_from_arguments((index, columns), {})\n else:\n raise ValueError(\n \"Need to specify at least one of 'labels', 'index' or 'columns'\"\n )\n\n obj = self\n\n for axis, labels in axes.items():\n if labels is not None:\n obj = obj._drop_axis(labels, axis, level=level, errors=errors)\n\n if inplace:\n self._update_inplace(obj)\n else:\n return obj\n\n def _drop_axis(\n self: FrameOrSeries, labels, axis, level=None, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis = self._get_axis(axis)\n\n if axis.is_unique:\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n new_axis = axis.drop(labels, level=level, errors=errors)\n else:\n new_axis = axis.drop(labels, errors=errors)\n result = self.reindex(**{axis_name: new_axis})\n\n # Case for non-unique axis\n else:\n labels = ensure_object(com.index_labels_to_array(labels))\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n indexer = ~axis.get_level_values(level).isin(labels)\n\n # GH 18561 MultiIndex.drop should raise if label is absent\n if errors == \"raise\" and indexer.all():\n raise KeyError(f\"{labels} not found in axis\")\n else:\n indexer = ~axis.isin(labels)\n # Check if label doesn't exist along axis\n labels_missing = (axis.get_indexer_for(labels) == -1).any()\n if errors == \"raise\" and labels_missing:\n raise KeyError(f\"{labels} not found in axis\")\n\n slicer = [slice(None)] * self.ndim\n slicer[self._get_axis_number(axis_name)] = indexer\n\n result = self.loc[tuple(slicer)]\n\n return result\n\n def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:\n \"\"\"\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n # NOTE: This does *not* call __finalize__ and that's an explicit\n # decision that we may revisit in the future.\n\n self._reset_cache()\n self._clear_item_cache()\n self._data = getattr(result, \"_data\", result)\n self._maybe_update_cacher(verify_is_copy=verify_is_copy)\n\n def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{prefix}{}\".format, prefix=prefix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{}{suffix}\".format, suffix=suffix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace: bool_t = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool_t = False,\n ):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n \"\"\"\n raise AbstractMethodError(self)\n\n def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:\n \"\"\"\n Conform %(klass)s to new index with optional filling logic.\n\n Places NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data.\n %(optional_axis)s\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: Propagate last valid observation forward to next\n valid.\n * backfill / bfill: Use next valid observation to fill gap.\n * nearest: Use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value='missing')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, '2009-12-29') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method='bfill')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n \"\"\"\n # TODO: Decide if we care about having different examples for different\n # kinds\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n method = missing.clean_reindex_fill_method(kwargs.pop(\"method\", None))\n level = kwargs.pop(\"level\", None)\n copy = kwargs.pop(\"copy\", True)\n limit = kwargs.pop(\"limit\", None)\n tolerance = kwargs.pop(\"tolerance\", None)\n fill_value = kwargs.pop(\"fill_value\", None)\n\n # Series.reindex doesn't use / need the axis kwarg\n # We pop and ignore it here, to make writing Series/Frame generic code\n # easier\n kwargs.pop(\"axis\", None)\n\n if kwargs:\n raise TypeError(\n \"reindex() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n self._consolidate_inplace()\n\n # if all axes that are requested to reindex are equal, then only copy\n # if indicated must have index names equal here as well as values\n if all(\n self._get_axis(axis).identical(ax)\n for axis, ax in axes.items()\n if ax is not None\n ):\n if copy:\n return self.copy()\n return self\n\n # check if we are a multi reindex\n if self._needs_reindex_multi(axes, method, level):\n return self._reindex_multi(axes, copy, fill_value)\n\n # perform the reindex on the axes\n return self._reindex_axes(\n axes, level, limit, tolerance, method, fill_value, copy\n ).__finalize__(self)\n\n def _reindex_axes(\n self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy\n ) -> FrameOrSeries:\n \"\"\"Perform the reindex for all the axes.\"\"\"\n obj = self\n for a in self._AXIS_ORDERS:\n labels = axes[a]\n if labels is None:\n continue\n\n ax = self._get_axis(a)\n new_index, indexer = ax.reindex(\n labels, level=level, limit=limit, tolerance=tolerance, method=method\n )\n\n axis = self._get_axis_number(a)\n obj = obj._reindex_with_indexers(\n {axis: [new_index, indexer]},\n fill_value=fill_value,\n copy=copy,\n allow_dups=False,\n )\n\n return obj\n\n def _needs_reindex_multi(self, axes, method, level) -> bool_t:\n \"\"\"Check if we do need a multi reindex.\"\"\"\n return (\n (com.count_not_none(*axes.values()) == self._AXIS_LEN)\n and method is None\n and level is None\n and not self._is_mixed_type\n )\n\n def _reindex_multi(self, axes, copy, fill_value):\n raise AbstractMethodError(self)\n\n def _reindex_with_indexers(\n self: FrameOrSeries,\n reindexers,\n fill_value=None,\n copy: bool_t = False,\n allow_dups: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"allow_dups indicates an internal call here \"\"\"\n # reindex doing multiple operations on different axes if indicated\n new_data = self._data\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n baxis = self._get_block_manager_axis(axis)\n\n if index is None:\n continue\n\n index = ensure_index(index)\n if indexer is not None:\n indexer = ensure_int64(indexer)\n\n # TODO: speed up on homogeneous DataFrame objects\n new_data = new_data.reindex_indexer(\n index,\n indexer,\n axis=baxis,\n fill_value=fill_value,\n allow_dups=allow_dups,\n copy=copy,\n )\n\n if copy and new_data is self._data:\n new_data = new_data.copy()\n\n return self._constructor(new_data).__finalize__(self)\n\n def filter(\n self: FrameOrSeries,\n items=None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Subset the dataframe rows or columns according to the specified index labels.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : str\n Keep labels from axis for which \"like in label == True\".\n regex : str (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n The axis to filter on, expressed either as an index (int)\n or axis name (str). By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n \"\"\"\n nkw = com.count_not_none(items, like, regex)\n if nkw > 1:\n raise TypeError(\n \"Keyword arguments `items`, `like`, or `regex` \"\n \"are mutually exclusive\"\n )\n\n if axis is None:\n axis = self._info_axis_name\n labels = self._get_axis(axis)\n\n if items is not None:\n name = self._get_axis_name(axis)\n return self.reindex(**{name: [r for r in items if r in labels]})\n elif like:\n\n def f(x):\n return like in ensure_str(x)\n\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n elif regex:\n\n def f(x):\n return matcher.search(ensure_str(x)) is not None\n\n matcher = re.compile(regex)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n else:\n raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n\n def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n For negative values of `n`, this function returns all rows except\n the last `n` rows, equivalent to ``df[:-n]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n\n For negative values of `n`\n\n >>> df.head(-3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n \"\"\"\n return self.iloc[:n]\n\n def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3)\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n if n == 0:\n return self.iloc[0:0]\n return self.iloc[-n:]\n\n def sample(\n self: FrameOrSeries,\n n=None,\n frac=None,\n replace=False,\n weights=None,\n random_state=None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Allow or disallow sampling of the same row more than once.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Notes\n -----\n If `frac` > 1, `replacement` should be set to `True`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n An upsample sample of the ``DataFrame`` with replacement:\n Note that `replace` parameter has to be `True` for `frac` parameter > 1.\n\n >>> df.sample(frac=2, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n falcon 2 2 10\n falcon 2 2 10\n fish 0 0 8\n dog 4 0 2\n fish 0 0 8\n dog 4 0 2\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n\n axis = self._get_axis_number(axis)\n axis_length = self.shape[axis]\n\n # Process random_state argument\n rs = com.random_state(random_state)\n\n # Check weights for compliance\n if weights is not None:\n\n # If a series, align with frame\n if isinstance(weights, ABCSeries):\n weights = weights.reindex(self.axes[axis])\n\n # Strings acceptable if a dataframe and axis = 0\n if isinstance(weights, str):\n if isinstance(self, ABCDataFrame):\n if axis == 0:\n try:\n weights = self[weights]\n except KeyError as err:\n raise KeyError(\n \"String passed to weights not a valid column\"\n ) from err\n else:\n raise ValueError(\n \"Strings can only be passed to \"\n \"weights when sampling from rows on \"\n \"a DataFrame\"\n )\n else:\n raise ValueError(\n \"Strings cannot be passed as weights \"\n \"when sampling from a Series.\"\n )\n\n weights = pd.Series(weights, dtype=\"float64\")\n\n if len(weights) != axis_length:\n raise ValueError(\n \"Weights and axis to be sampled must be of same length\"\n )\n\n if (weights == np.inf).any() or (weights == -np.inf).any():\n raise ValueError(\"weight vector may not include `inf` values\")\n\n if (weights < 0).any():\n raise ValueError(\"weight vector many not include negative values\")\n\n # If has nan, set to zero.\n weights = weights.fillna(0)\n\n # Renormalize if don't sum to 1\n if weights.sum() != 1:\n if weights.sum() != 0:\n weights = weights / weights.sum()\n else:\n raise ValueError(\"Invalid weights: weights sum to zero\")\n\n weights = weights.values\n\n # If no frac or n, default to n=1.\n if n is None and frac is None:\n n = 1\n elif frac is not None and frac > 1 and not replace:\n raise ValueError(\n \"Replace has to be set to `True` when \"\n \"upsampling the population `frac` > 1.\"\n )\n elif n is not None and frac is None and n % 1 != 0:\n raise ValueError(\"Only integers accepted as `n` values\")\n elif n is None and frac is not None:\n n = int(round(frac * axis_length))\n elif n is not None and frac is not None:\n raise ValueError(\"Please enter a value for `frac` OR `n`, not both\")\n\n # Check for negative sizes\n if n < 0:\n raise ValueError(\n \"A negative number of rows requested. Please provide positive value.\"\n )\n\n locs = rs.choice(axis_length, size=n, replace=replace, p=weights)\n return self.take(locs, axis=axis)\n\n _shared_docs[\n \"pipe\"\n ] = r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n Function to apply to the %(klass)s.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the %(klass)s.\n args : iterable, optional\n Positional arguments passed into ``func``.\n kwargs : mapping, optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. Instead of writing\n\n >>> f(g(h(df), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n \"\"\"\n\n @Appender(_shared_docs[\"pipe\"] % _shared_doc_kwargs)\n def pipe(self, func, *args, **kwargs):\n return com.pipe(self, func, *args, **kwargs)\n\n _shared_docs[\"aggregate\"] = dedent(\n \"\"\"\n Aggregate using one or more operations over the specified axis.\n %(versionadded)s\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n scalar, Series or DataFrame\n\n The return can be:\n\n * scalar : when Series.agg is called with single function\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n\n Return scalar, Series or DataFrame.\n %(see_also)s\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n A passed user-defined-function will be passed a Series for evaluation.\n %(examples)s\"\"\"\n )\n\n _shared_docs[\n \"transform\"\n ] = \"\"\"\n Call ``func`` on self producing a %(klass)s with transformed values.\n\n Produced %(klass)s will have same axis length as self.\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for transforming the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n %(klass)s\n A %(klass)s that must have the same length as self.\n\n Raises\n ------\n ValueError : If the returned %(klass)s has a different length than self.\n\n See Also\n --------\n %(klass)s.agg : Only perform aggregating type operations.\n %(klass)s.apply : Invoke function on a %(klass)s.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n >>> df.transform(lambda x: x + 1)\n A B\n 0 1 2\n 1 2 3\n 2 3 4\n\n Even though the resulting %(klass)s must have the same length as the\n input %(klass)s, it is possible to provide several input functions:\n\n >>> s = pd.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\"\n\n # ----------------------------------------------------------------------\n # Attribute access\n\n def __finalize__(\n self: FrameOrSeries, other, method=None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n \"\"\"\n if isinstance(other, NDFrame):\n for name in other.attrs:\n self.attrs[name] = other.attrs[name]\n # For subclasses using _metadata.\n for name in self._metadata:\n assert isinstance(name, str)\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def __getattr__(self, name: str):\n \"\"\"\n After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n\n if (\n name in self._internal_names_set\n or name in self._metadata\n or name in self._accessors\n ):\n return object.__getattribute__(self, name)\n else:\n if self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name: str, value) -> None:\n \"\"\"\n After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n\n # if this fails, go on to more involved attribute setting\n # (note that this matches __getattr__, above).\n if name in self._internal_names_set:\n object.__setattr__(self, name, value)\n elif name in self._metadata:\n object.__setattr__(self, name, value)\n else:\n try:\n existing = getattr(self, name)\n if isinstance(existing, Index):\n object.__setattr__(self, name, value)\n elif name in self._info_axis:\n self[name] = value\n else:\n object.__setattr__(self, name, value)\n except (AttributeError, TypeError):\n if isinstance(self, ABCDataFrame) and (is_list_like(value)):\n warnings.warn(\n \"Pandas doesn't allow columns to be \"\n \"created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/\"\n \"stable/indexing.html#attribute-access\",\n stacklevel=2,\n )\n object.__setattr__(self, name, value)\n\n def _dir_additions(self):\n \"\"\"\n add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n \"\"\"\n additions = {\n c\n for c in self._info_axis.unique(level=0)[:100]\n if isinstance(c, str) and c.isidentifier()\n }\n return super()._dir_additions().union(additions)\n\n # ----------------------------------------------------------------------\n # Consolidation of internals\n\n def _protect_consolidate(self, f):\n \"\"\"\n Consolidate _data -- if the blocks have changed, then clear the\n cache\n \"\"\"\n blocks_before = len(self._data.blocks)\n result = f()\n if len(self._data.blocks) != blocks_before:\n self._clear_item_cache()\n return result\n\n def _consolidate_inplace(self) -> None:\n \"\"\"Consolidate data in place and return None\"\"\"\n\n def f():\n self._data = self._data.consolidate()\n\n self._protect_consolidate(f)\n\n def _consolidate(self, inplace: bool_t = False):\n \"\"\"\n Compute NDFrame with \"consolidated\" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : bool, default False\n If False return new object, otherwise modify existing object.\n\n Returns\n -------\n consolidated : same type as caller\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._consolidate_inplace()\n else:\n f = lambda: self._data.consolidate()\n cons_data = self._protect_consolidate(f)\n return self._constructor(cons_data).__finalize__(self)\n\n @property\n def _is_mixed_type(self) -> bool_t:\n f = lambda: self._data.is_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_numeric_mixed_type(self) -> bool_t:\n f = lambda: self._data.is_numeric_mixed_type\n return self._protect_consolidate(f)\n\n def _check_inplace_setting(self, value) -> bool_t:\n \"\"\" check whether we allow in-place setting with this type of value \"\"\"\n if self._is_mixed_type:\n if not self._is_numeric_mixed_type:\n\n # allow an actual np.nan thru\n if is_float(value) and np.isnan(value):\n return True\n\n raise TypeError(\n \"Cannot do inplace boolean setting on \"\n \"mixed-types with a non np.nan value\"\n )\n\n return True\n\n def _get_numeric_data(self):\n return self._constructor(self._data.get_numeric_data()).__finalize__(self)\n\n def _get_bool_data(self):\n return self._constructor(self._data.get_bool_data()).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Internal Interface Methods\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED)\n\n @property\n def _values(self) -> np.ndarray:\n \"\"\"internal implementation\"\"\"\n return self.values\n\n def _internal_get_values(self) -> np.ndarray:\n \"\"\"\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame.\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n SparseArray : Container for sparse data.\n \"\"\"\n return self.values\n\n @property\n def dtypes(self):\n \"\"\"\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n \"\"\"\n from pandas import Series\n\n return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)\n\n def _to_dict_of_blocks(self, copy: bool_t = True):\n \"\"\"\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n \"\"\"\n return {\n k: self._constructor(v).__finalize__(self)\n for k, v, in self._data.to_dict(copy=copy).items()\n }\n\n def astype(\n self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n Create a DataFrame:\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n Cast all columns to int32:\n\n >>> df.astype('int32').dtypes\n col1 int32\n col2 int32\n dtype: object\n\n Cast col1 to int32 using a dictionary:\n\n >>> df.astype({'col1': 'int32'}).dtypes\n col1 int32\n col2 int64\n dtype: object\n\n Create a series:\n\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1, 2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n \"\"\"\n if is_dict_like(dtype):\n if self.ndim == 1: # i.e. Series\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError(\n \"Only the Series name can be used for \"\n \"the key in Series dtype mappings.\"\n )\n new_type = dtype[self.name]\n return self.astype(new_type, copy, errors)\n\n for col_name in dtype.keys():\n if col_name not in self:\n raise KeyError(\n \"Only a column name can be used for the \"\n \"key in a dtype mappings argument.\"\n )\n results = []\n for col_name, col in self.items():\n if col_name in dtype:\n results.append(\n col.astype(dtype=dtype[col_name], copy=copy, errors=errors)\n )\n else:\n results.append(col.copy() if copy else col)\n\n elif is_extension_array_dtype(dtype) and self.ndim > 1:\n # GH 18099/22869: columnwise conversion to extension dtype\n # GH 24704: use iloc to handle duplicate column names\n results = [\n self.iloc[:, i].astype(dtype, copy=copy)\n for i in range(len(self.columns))\n ]\n\n else:\n # else, only a single dtype is given\n new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)\n return self._constructor(new_data).__finalize__(self)\n\n # GH 19920: retain column metadata after concat\n result = pd.concat(results, axis=1, copy=False)\n result.columns = self.columns\n return result\n\n def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n \"\"\"\n Make a copy of this object's indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object's data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object's data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series or DataFrame\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n \"\"\"\n data = self._data.copy(deep=deep)\n return self._constructor(data).__finalize__(self)\n\n def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n return self.copy(deep=deep)\n\n def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n def _convert(\n self: FrameOrSeries,\n datetime: bool_t = False,\n numeric: bool_t = False,\n timedelta: bool_t = False,\n coerce: bool_t = False,\n copy: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : bool, default False\n If True, convert to date where possible.\n numeric : bool, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : bool, default False\n If True, convert to timedelta where possible.\n coerce : bool, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT).\n copy : bool, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n \"\"\"\n validate_bool_kwarg(datetime, \"datetime\")\n validate_bool_kwarg(numeric, \"numeric\")\n validate_bool_kwarg(timedelta, \"timedelta\")\n validate_bool_kwarg(coerce, \"coerce\")\n validate_bool_kwarg(copy, \"copy\")\n return self._constructor(\n self._data.convert(\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=copy,\n )\n ).__finalize__(self)\n\n def infer_objects(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n convert_dtypes : Convert argument to best possible dtype.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"a\", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n \"\"\"\n # numeric=False necessary to only soft convert;\n # python objects will still be converted to\n # native numpy numeric types\n return self._constructor(\n self._data.convert(\n datetime=True, numeric=False, timedelta=True, coerce=False, copy=True\n )\n ).__finalize__(self)\n\n def convert_dtypes(\n self: FrameOrSeries,\n infer_objects: bool_t = True,\n convert_string: bool_t = True,\n convert_integer: bool_t = True,\n convert_boolean: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n infer_objects : bool, default True\n Whether object dtypes should be converted to the best possible types.\n convert_string : bool, default True\n Whether object dtypes should be converted to ``StringDtype()``.\n convert_integer : bool, default True\n Whether, if possible, conversion can be done to integer extension types.\n convert_boolean : bool, defaults True\n Whether object dtypes should be converted to ``BooleanDtypes()``.\n\n Returns\n -------\n Series or DataFrame\n Copy of input object with new dtype.\n\n See Also\n --------\n infer_objects : Infer dtypes of objects.\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n\n Notes\n -----\n By default, ``convert_dtypes`` will attempt to convert a Series (or each\n Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options\n ``convert_string``, ``convert_integer``, and ``convert_boolean``, it is\n possible to turn off individual conversions to ``StringDtype``, the integer\n extension types or ``BooleanDtype``, respectively.\n\n For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference\n rules as during normal Series/DataFrame construction. Then, if possible,\n convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension\n type, otherwise leave as ``object``.\n\n If the dtype is integer, convert to an appropriate integer extension type.\n\n If the dtype is numeric, and consists of all integers, convert to an\n appropriate integer extension type.\n\n In the future, as new dtypes are added that support ``pd.NA``, the results\n of this method will change to support those new dtypes.\n\n Examples\n --------\n >>> df = pd.DataFrame(\n ... {\n ... \"a\": pd.Series([1, 2, 3], dtype=np.dtype(\"int32\")),\n ... \"b\": pd.Series([\"x\", \"y\", \"z\"], dtype=np.dtype(\"O\")),\n ... \"c\": pd.Series([True, False, np.nan], dtype=np.dtype(\"O\")),\n ... \"d\": pd.Series([\"h\", \"i\", np.nan], dtype=np.dtype(\"O\")),\n ... \"e\": pd.Series([10, np.nan, 20], dtype=np.dtype(\"float\")),\n ... \"f\": pd.Series([np.nan, 100.5, 200], dtype=np.dtype(\"float\")),\n ... }\n ... )\n\n Start with a DataFrame with default dtypes.\n\n >>> df\n a b c d e f\n 0 1 x True h 10.0 NaN\n 1 2 y False i NaN 100.5\n 2 3 z NaN NaN 20.0 200.0\n\n >>> df.dtypes\n a int32\n b object\n c object\n d object\n e float64\n f float64\n dtype: object\n\n Convert the DataFrame to use best possible dtypes.\n\n >>> dfn = df.convert_dtypes()\n >>> dfn\n a b c d e f\n 0 1 x True h 10 NaN\n 1 2 y False i <NA> 100.5\n 2 3 z <NA> <NA> 20 200.0\n\n >>> dfn.dtypes\n a Int32\n b string\n c boolean\n d string\n e Int64\n f float64\n dtype: object\n\n Start with a Series of strings and missing data represented by ``np.nan``.\n\n >>> s = pd.Series([\"a\", \"b\", np.nan])\n >>> s\n 0 a\n 1 b\n 2 NaN\n dtype: object\n\n Obtain a Series with dtype ``StringDtype``.\n\n >>> s.convert_dtypes()\n 0 a\n 1 b\n 2 <NA>\n dtype: string\n \"\"\"\n if self.ndim == 1:\n return self._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n else:\n results = [\n col._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n for col_name, col in self.items()\n ]\n result = pd.concat(results, axis=1, copy=False)\n return result\n\n # ----------------------------------------------------------------------\n # Filling NA's\n\n @doc(**_shared_doc_kwargs)\n def fillna(\n self: FrameOrSeries,\n value=None,\n method=None,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : {axes_single_arg}\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n {klass} or None\n Object with missing values filled or None if ``inplace=True``.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n value, method = validate_fillna_kwargs(value, method)\n\n self._consolidate_inplace()\n\n # set the default here, so functions examining the signaure\n # can detect if something was set (e.g. in groupby) (GH9221)\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n\n if value is None:\n\n if self._is_mixed_type and axis == 1:\n if inplace:\n raise NotImplementedError()\n result = self.T.fillna(method=method, limit=limit).T\n\n # need to downcast here because of all of the transposes\n result._data = result._data.downcast()\n\n return result\n\n new_data = self._data.interpolate(\n method=method,\n axis=axis,\n limit=limit,\n inplace=inplace,\n coerce=True,\n downcast=downcast,\n )\n else:\n if len(self._get_axis(axis)) == 0:\n return self\n\n if self.ndim == 1:\n if isinstance(value, (dict, ABCSeries)):\n value = create_series_with_explicit_dtype(\n value, dtype_if_empty=object\n )\n elif not is_list_like(value):\n pass\n else:\n raise TypeError(\n '\"value\" parameter must be a scalar, dict '\n \"or Series, but you passed a \"\n f'\"{type(value).__name__}\"'\n )\n\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n elif isinstance(value, (dict, ABCSeries)):\n if axis == 1:\n raise NotImplementedError(\n \"Currently only can fill \"\n \"with dict/Series column \"\n \"by column\"\n )\n\n result = self if inplace else self.copy()\n for k, v in value.items():\n if k not in result:\n continue\n obj = result[k]\n obj.fillna(v, limit=limit, inplace=True, downcast=downcast)\n return result if not inplace else None\n\n elif not is_list_like(value):\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n elif isinstance(value, ABCDataFrame) and self.ndim == 2:\n new_data = self.where(self.notna(), value)\n else:\n raise ValueError(f\"invalid fill value with a {type(value)}\")\n\n if inplace:\n self._update_inplace(new_data)\n return None\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def ffill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"ffill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n def bfill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"bfill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n _shared_docs[\n \"replace\"\n ] = \"\"\"\n Replace values given in `to_replace` with `value`.\n\n Values of the %(klass)s are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way the `value`\n parameter should be `None`.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{'a': {'b': np.nan}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The `value`\n parameter should be ``None`` to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n inplace : bool, default False\n If True, in place. Note: this will modify any\n other views on this object (e.g. a column from a DataFrame).\n Returns the caller if this is True.\n limit : int, default None\n Maximum size gap to forward or backward fill.\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. If this is ``True`` then `to_replace` *must* be a\n string. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n method : {'pad', 'ffill', 'bfill', `None`}\n The method to use when for replacement, when `to_replace` is a\n scalar, list or tuple and `value` is ``None``.\n\n .. versionchanged:: 0.23.0\n Added to DataFrame.\n\n Returns\n -------\n %(klass)s\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n\n TypeError\n * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n %(klass)s.fillna : Fill NA values.\n %(klass)s.where : Replace values based on boolean condition.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n >>> s.replace([1, 2], method='bfill')\n 0 0\n 1 3\n 2 3\n 3 3\n 4 4\n dtype: int64\n\n **dict-like `to_replace`**\n\n >>> df.replace({0: 10, 1: 100})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': 0, 'B': 5}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': {0: 100, 4: 400}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n the data types in the `to_replace` parameter must match the data\n type of the value being replaced:\n\n >>> df = pd.DataFrame({'A': [True, False, True],\n ... 'B': [False, True, False]})\n >>> df.replace({'a string': 'new value', True: False}) # raises\n Traceback (most recent call last):\n ...\n TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n\n This raises a ``TypeError`` because one of the ``dict`` keys is not of\n the correct type for replacement.\n\n Compare the behavior of ``s.replace({'a': None})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({'a': None})`` is equivalent to\n ``s.replace(to_replace={'a': None}, value=None, method=None)``:\n\n >>> s.replace({'a': None})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n When ``value=None`` and `to_replace` is a scalar, list or\n tuple, `replace` uses the method parameter (default 'pad') to do the\n replacement. So this is why the 'a' values are being replaced by 10\n in rows 1 and 2 and 'b' in row 4 in this case.\n The command ``s.replace('a', None)`` is actually equivalent to\n ``s.replace(to_replace='a', value=None, method='pad')``:\n\n >>> s.replace('a', None)\n 0 10\n 1 10\n 2 10\n 3 b\n 4 b\n dtype: object\n \"\"\"\n\n @Appender(_shared_docs[\"replace\"] % _shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n if not (\n is_scalar(to_replace)\n or isinstance(to_replace, pd.Series)\n or is_re_compilable(to_replace)\n or is_list_like(to_replace)\n ):\n raise TypeError(\n \"Expecting 'to_replace' to be either a scalar, array-like, \"\n \"dict or None, got invalid type \"\n f\"{repr(type(to_replace).__name__)}\"\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not is_bool(regex) and to_replace is not None:\n raise AssertionError(\"'to_replace' must be 'None' if 'regex' is not a bool\")\n\n self._consolidate_inplace()\n\n if value is None:\n # passing a single value that is scalar like\n # when value is None (GH5319), for compat\n if not is_dict_like(to_replace) and not is_dict_like(regex):\n to_replace = [to_replace]\n\n if isinstance(to_replace, (tuple, list)):\n if isinstance(self, ABCDataFrame):\n return self.apply(\n _single_replace, args=(to_replace, method, inplace, limit)\n )\n return _single_replace(self, to_replace, method, inplace, limit)\n\n if not is_dict_like(to_replace):\n if not is_dict_like(regex):\n raise TypeError(\n 'If \"to_replace\" and \"value\" are both None '\n 'and \"to_replace\" is not a list, then '\n \"regex must be a mapping\"\n )\n to_replace = regex\n regex = True\n\n items = list(to_replace.items())\n keys, values = zip(*items) if items else ([], [])\n\n are_mappings = [is_dict_like(v) for v in values]\n\n if any(are_mappings):\n if not all(are_mappings):\n raise TypeError(\n \"If a nested mapping is passed, all values \"\n \"of the top level mapping must be mappings\"\n )\n # passed a nested dict/Series\n to_rep_dict = {}\n value_dict = {}\n\n for k, v in items:\n keys, values = list(zip(*v.items())) or ([], [])\n\n to_rep_dict[k] = list(keys)\n value_dict[k] = list(values)\n\n to_replace, value = to_rep_dict, value_dict\n else:\n to_replace, value = keys, values\n\n return self.replace(\n to_replace, value, inplace=inplace, limit=limit, regex=regex\n )\n else:\n\n # need a non-zero len on all axes\n if not self.size:\n return self\n\n new_data = self._data\n if is_dict_like(to_replace):\n if is_dict_like(value): # {'A' : NA} -> {'A' : 0}\n res = self if inplace else self.copy()\n for c, src in to_replace.items():\n if c in value and c in self:\n # object conversion is handled in\n # series.replace which is called recursively\n res[c] = res[c].replace(\n to_replace=src,\n value=value[c],\n inplace=False,\n regex=regex,\n )\n return None if inplace else res\n\n # {'A': NA} -> 0\n elif not is_list_like(value):\n keys = [(k, src) for k, src in to_replace.items() if k in self]\n keys_len = len(keys) - 1\n for i, (k, src) in enumerate(keys):\n convert = i == keys_len\n new_data = new_data.replace(\n to_replace=src,\n value=value,\n filter=[k],\n inplace=inplace,\n regex=regex,\n convert=convert,\n )\n else:\n raise TypeError(\"value argument must be scalar, dict, or Series\")\n\n elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']\n if is_list_like(value):\n if len(to_replace) != len(value):\n raise ValueError(\n f\"Replacement lists must match in length. \"\n f\"Expecting {len(to_replace)} got {len(value)} \"\n )\n\n new_data = self._data.replace_list(\n src_list=to_replace,\n dest_list=value,\n inplace=inplace,\n regex=regex,\n )\n\n else: # [NA, ''] -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n elif to_replace is None:\n if not (\n is_re_compilable(regex)\n or is_list_like(regex)\n or is_dict_like(regex)\n ):\n raise TypeError(\n f\"'regex' must be a string or a compiled regular expression \"\n f\"or a list or dict of strings or regular expressions, \"\n f\"you passed a {repr(type(regex).__name__)}\"\n )\n return self.replace(\n regex, value, inplace=inplace, limit=limit, regex=True\n )\n else:\n\n # dest iterable dict-like\n if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}\n new_data = self._data\n\n for k, v in value.items():\n if k in self:\n new_data = new_data.replace(\n to_replace=to_replace,\n value=v,\n filter=[k],\n inplace=inplace,\n regex=regex,\n )\n\n elif not is_list_like(value): # NA -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n else:\n raise TypeError(\n f'Invalid \"to_replace\" type: {repr(type(to_replace).__name__)}'\n )\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"interpolate\"\n ] = \"\"\"\n Please note that only ``method='linear'`` is supported for\n DataFrame/Series with a MultiIndex.\n\n Parameters\n ----------\n method : str, default 'linear'\n Interpolation technique to use. One of:\n\n * 'linear': Ignore the index and treat the values as equally\n spaced. This is the only method supported on MultiIndexes.\n * 'time': Works on daily and higher resolution data to interpolate\n given length of interval.\n * 'index', 'values': use the actual numerical values of the index.\n * 'pad': Fill in NaNs using existing values.\n * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',\n 'barycentric', 'polynomial': Passed to\n `scipy.interpolate.interp1d`. These methods use the numerical\n values of the index. Both 'polynomial' and 'spline' require that\n you also specify an `order` (int), e.g.\n ``df.interpolate(method='polynomial', order=5)``.\n * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':\n Wrappers around the SciPy interpolation methods of similar\n names. See `Notes`.\n * 'from_derivatives': Refers to\n `scipy.interpolate.BPoly.from_derivatives` which\n replaces 'piecewise_polynomial' interpolation method in\n scipy 0.18.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Axis to interpolate along.\n limit : int, optional\n Maximum number of consecutive NaNs to fill. Must be greater than\n 0.\n inplace : bool, default False\n Update the data in place if possible.\n limit_direction : {'forward', 'backward', 'both'}, default 'forward'\n If limit is specified, consecutive NaNs will be filled in this\n direction.\n limit_area : {`None`, 'inside', 'outside'}, default None\n If limit is specified, consecutive NaNs will be filled with this\n restriction.\n\n * ``None``: No fill restriction.\n * 'inside': Only fill NaNs surrounded by valid values\n (interpolate).\n * 'outside': Only fill NaNs outside valid values (extrapolate).\n\n .. versionadded:: 0.23.0\n\n downcast : optional, 'infer' or None, defaults to None\n Downcast dtypes if possible.\n **kwargs\n Keyword arguments to pass on to the interpolating function.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller, interpolated at\n some or all ``NaN`` values.\n\n See Also\n --------\n fillna : Fill missing values using different methods.\n scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials\n (Akima interpolator).\n scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the\n Bernstein basis.\n scipy.interpolate.interp1d : Interpolate a 1-D function.\n scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh\n interpolator).\n scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic\n interpolation.\n scipy.interpolate.CubicSpline : Cubic spline data interpolator.\n\n Notes\n -----\n The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'\n methods are wrappers around the respective SciPy implementations of\n similar names. These use the actual numerical values of the index.\n For more information on their behavior, see the\n `SciPy documentation\n <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__\n and `SciPy tutorial\n <https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.\n\n Examples\n --------\n Filling in ``NaN`` in a :class:`~pandas.Series` via linear\n interpolation.\n\n >>> s = pd.Series([0, 1, np.nan, 3])\n >>> s\n 0 0.0\n 1 1.0\n 2 NaN\n 3 3.0\n dtype: float64\n >>> s.interpolate()\n 0 0.0\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n Filling in ``NaN`` in a Series by padding, but filling at most two\n consecutive ``NaN`` at a time.\n\n >>> s = pd.Series([np.nan, \"single_one\", np.nan,\n ... \"fill_two_more\", np.nan, np.nan, np.nan,\n ... 4.71, np.nan])\n >>> s\n 0 NaN\n 1 single_one\n 2 NaN\n 3 fill_two_more\n 4 NaN\n 5 NaN\n 6 NaN\n 7 4.71\n 8 NaN\n dtype: object\n >>> s.interpolate(method='pad', limit=2)\n 0 NaN\n 1 single_one\n 2 single_one\n 3 fill_two_more\n 4 fill_two_more\n 5 fill_two_more\n 6 NaN\n 7 4.71\n 8 4.71\n dtype: object\n\n Filling in ``NaN`` in a Series via polynomial interpolation or splines:\n Both 'polynomial' and 'spline' methods require that you also specify\n an ``order`` (int).\n\n >>> s = pd.Series([0, 2, np.nan, 8])\n >>> s.interpolate(method='polynomial', order=2)\n 0 0.000000\n 1 2.000000\n 2 4.666667\n 3 8.000000\n dtype: float64\n\n Fill the DataFrame forward (that is, going down) along each column\n using linear interpolation.\n\n Note how the last entry in column 'a' is interpolated differently,\n because there is no entry after it to use for interpolation.\n Note how the first entry in column 'b' remains ``NaN``, because there\n is no entry before it to use for interpolation.\n\n >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),\n ... (np.nan, 2.0, np.nan, np.nan),\n ... (2.0, 3.0, np.nan, 9.0),\n ... (np.nan, 4.0, -4.0, 16.0)],\n ... columns=list('abcd'))\n >>> df\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 NaN 2.0 NaN NaN\n 2 2.0 3.0 NaN 9.0\n 3 NaN 4.0 -4.0 16.0\n >>> df.interpolate(method='linear', limit_direction='forward', axis=0)\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 1.0 2.0 -2.0 5.0\n 2 2.0 3.0 -3.0 9.0\n 3 2.0 4.0 -4.0 16.0\n\n Using polynomial interpolation.\n\n >>> df['d'].interpolate(method='polynomial', order=2)\n 0 1.0\n 1 4.0\n 2 9.0\n 3 16.0\n Name: d, dtype: float64\n \"\"\"\n\n @Appender(_shared_docs[\"interpolate\"] % _shared_doc_kwargs)\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction=\"forward\",\n limit_area=None,\n downcast=None,\n **kwargs,\n ):\n \"\"\"\n Interpolate values according to different methods.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = self._get_axis_number(axis)\n\n if axis == 0:\n ax = self._info_axis_name\n _maybe_transposed_self = self\n elif axis == 1:\n _maybe_transposed_self = self.T\n ax = 1\n\n ax = _maybe_transposed_self._get_axis_number(ax)\n\n if _maybe_transposed_self.ndim == 2:\n alt_ax = 1 - ax\n else:\n alt_ax = ax\n\n if isinstance(_maybe_transposed_self.index, MultiIndex) and method != \"linear\":\n raise ValueError(\n \"Only `method=linear` interpolation is supported on MultiIndexes.\"\n )\n\n if _maybe_transposed_self._data.get_dtype_counts().get(\"object\") == len(\n _maybe_transposed_self.T\n ):\n raise TypeError(\n \"Cannot interpolate with all object-dtype columns \"\n \"in the DataFrame. Try setting at least one \"\n \"column to a numeric dtype.\"\n )\n\n # create/use the index\n if method == \"linear\":\n # prior default\n index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))\n else:\n index = _maybe_transposed_self._get_axis(alt_ax)\n methods = {\"index\", \"values\", \"nearest\", \"time\"}\n is_numeric_or_datetime = (\n is_numeric_dtype(index)\n or is_datetime64_any_dtype(index)\n or is_timedelta64_dtype(index)\n )\n if method not in methods and not is_numeric_or_datetime:\n raise ValueError(\n \"Index column must be numeric or datetime type when \"\n f\"using {method} method other than linear. \"\n \"Try setting a numeric or datetime index column before \"\n \"interpolating.\"\n )\n\n if isna(index).any():\n raise NotImplementedError(\n \"Interpolation with NaNs in the index \"\n \"has not been implemented. Try filling \"\n \"those NaNs before interpolating.\"\n )\n data = _maybe_transposed_self._data\n new_data = data.interpolate(\n method=method,\n axis=ax,\n index=index,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n if inplace:\n if axis == 1:\n new_data = self._constructor(new_data).T._data\n self._update_inplace(new_data)\n else:\n res = self._constructor(new_data).__finalize__(self)\n if axis == 1:\n res = res.T\n return res\n\n # ----------------------------------------------------------------------\n # Timeseries methods Methods\n\n def asof(self, where, subset=None):\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n \"\"\"\n if isinstance(where, str):\n where = Timestamp(where)\n\n if not self.index.is_monotonic:\n raise ValueError(\"asof requires a sorted index\")\n\n is_series = isinstance(self, ABCSeries)\n if is_series:\n if subset is not None:\n raise ValueError(\"subset is not valid for Series\")\n else:\n if subset is None:\n subset = self.columns\n if not is_list_like(subset):\n subset = [subset]\n\n is_list = is_list_like(where)\n if not is_list:\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq)\n\n if where < start:\n if not is_series:\n from pandas import Series\n\n return Series(index=self.columns, name=where, dtype=np.float64)\n return np.nan\n\n # It's always much faster to use a *while* loop here for\n # Series than pre-computing all the NAs. However a\n # *while* loop is extremely expensive for DataFrame\n # so we later pre-compute all the NAs and use the same\n # code path whether *where* is a scalar or list.\n # See PR: https://github.com/pandas-dev/pandas/pull/14476\n if is_series:\n loc = self.index.searchsorted(where, side=\"right\")\n if loc > 0:\n loc -= 1\n\n values = self._values\n while loc > 0 and isna(values[loc]):\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where) if is_list else Index([where])\n\n nulls = self.isna() if is_series else self[subset].isna().any(1)\n if nulls.all():\n if is_series:\n return self._constructor(np.nan, index=where, name=self.name)\n elif is_list:\n from pandas import DataFrame\n\n return DataFrame(np.nan, index=where, columns=self.columns)\n else:\n from pandas import Series\n\n return Series(np.nan, index=self.columns, name=where[0])\n\n locs = self.index.asof_locs(where, ~(nulls.values))\n\n # mask the missing\n missing = locs == -1\n data = self.take(locs)\n data.index = where\n data.loc[missing] = np.nan\n return data if is_list else data.iloc[-1]\n\n # ----------------------------------------------------------------------\n # Action Methods\n\n _shared_docs[\n \"isna\"\n ] = \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or :attr:`numpy.NaN`, gets mapped to True\n values.\n Everything else gets mapped to False values. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.isnull : Alias of isna.\n %(klass)s.notna : Boolean inverse of isna.\n %(klass)s.dropna : Omit axes labels with missing values.\n isna : Top-level isna.\n\n Examples\n --------\n Show which entries in a DataFrame are NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.isna()\n age born name toy\n 0 False True False True\n 1 False False False False\n 2 True False False False\n\n Show which entries in a Series are NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.isna()\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isna(self: FrameOrSeries) -> FrameOrSeries:\n return isna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isnull(self: FrameOrSeries) -> FrameOrSeries:\n return isna(self).__finalize__(self)\n\n _shared_docs[\n \"notna\"\n ] = \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to False\n values.\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.notnull : Alias of notna.\n %(klass)s.isna : Boolean inverse of notna.\n %(klass)s.dropna : Omit axes labels with missing values.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notna()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notna(self: FrameOrSeries) -> FrameOrSeries:\n return notna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notnull(self: FrameOrSeries) -> FrameOrSeries:\n return notna(self).__finalize__(self)\n\n def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):\n if (lower is not None and np.any(isna(lower))) or (\n upper is not None and np.any(isna(upper))\n ):\n raise ValueError(\"Cannot use an NA value as a clip threshold\")\n\n result = self\n mask = isna(self.values)\n\n with np.errstate(all=\"ignore\"):\n if upper is not None:\n subset = self.to_numpy() <= upper\n result = result.where(subset, upper, axis=None, inplace=False)\n if lower is not None:\n subset = self.to_numpy() >= lower\n result = result.where(subset, lower, axis=None, inplace=False)\n\n if np.any(mask):\n result[mask] = np.nan\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _clip_with_one_bound(self, threshold, method, axis, inplace):\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # method is self.le for upper bound and self.ge for lower bound\n if is_scalar(threshold) and is_number(threshold):\n if method.__name__ == \"le\":\n return self._clip_with_scalar(None, threshold, inplace=inplace)\n return self._clip_with_scalar(threshold, None, inplace=inplace)\n\n subset = method(threshold, axis=axis) | isna(self)\n\n # GH #15390\n # In order for where method to work, the threshold must\n # be transformed to NDFrame from other array like structure.\n if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):\n if isinstance(self, ABCSeries):\n threshold = self._constructor(threshold, index=self.index)\n else:\n threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]\n return self.where(subset, threshold, axis=axis, inplace=inplace)\n\n def clip(\n self: FrameOrSeries,\n lower=None,\n upper=None,\n axis=None,\n inplace: bool_t = False,\n *args,\n **kwargs,\n ) -> FrameOrSeries:\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = nv.validate_clip_with_axis(axis, args, kwargs)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # GH 17276\n # numpy doesn't like NaN as a clip value\n # so ignore\n # GH 19992\n # numpy doesn't drop a list-like bound containing NaN\n if not is_list_like(lower) and np.any(isna(lower)):\n lower = None\n if not is_list_like(upper) and np.any(isna(upper)):\n upper = None\n\n # GH 2747 (arguments were reversed)\n if lower is not None and upper is not None:\n if is_scalar(lower) and is_scalar(upper):\n lower, upper = min(lower, upper), max(lower, upper)\n\n # fast-path for scalars\n if (lower is None or (is_scalar(lower) and is_number(lower))) and (\n upper is None or (is_scalar(upper) and is_number(upper))\n ):\n return self._clip_with_scalar(lower, upper, inplace=inplace)\n\n result = self\n if lower is not None:\n result = result._clip_with_one_bound(\n lower, method=self.ge, axis=axis, inplace=inplace\n )\n if upper is not None:\n if inplace:\n result = self\n result = result._clip_with_one_bound(\n upper, method=self.le, axis=axis, inplace=inplace\n )\n\n return result\n\n _shared_docs[\n \"groupby\"\n ] = \"\"\"\n Group %(klass)s using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it's called on each value of the object's\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted as a (single) key.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n %(klass)sGroupBy\n Returns a groupby object that contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n \"\"\"\n\n def asfreq(\n self: FrameOrSeries,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool_t = False,\n fill_value=None,\n ) -> FrameOrSeries:\n \"\"\"\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset or str\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill.\n how : {'start', 'end'}, default end\n For PeriodIndex only (see PeriodIndex.asfreq).\n normalize : bool, default False\n Whether to reset output index to midnight.\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n \"\"\"\n from pandas.core.resample import asfreq\n\n return asfreq(\n self,\n freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n def at_time(\n self: FrameOrSeries, time, asof: bool_t = False, axis=None\n ) -> FrameOrSeries:\n \"\"\"\n Select values at particular time of day (e.g., 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_at_time(time, asof=asof)\n except AttributeError as err:\n raise TypeError(\"Index must be DatetimeIndex\") from err\n\n return self._take_with_is_copy(indexer, axis=axis)\n\n def between_time(\n self: FrameOrSeries,\n start_time,\n end_time,\n include_start: bool_t = True,\n include_end: bool_t = True,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_between_time(\n start_time,\n end_time,\n include_start=include_start,\n include_end=include_end,\n )\n except AttributeError as err:\n raise TypeError(\"Index must be DatetimeIndex\") from err\n\n return self._take_with_is_copy(indexer, axis=axis)\n\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: int = 0,\n on=None,\n level=None,\n ) -> \"Resampler\":\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : DateOffset, Timedelta or str\n The offset string or object representing target conversion.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.\n\n Examples\n --------\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n \"\"\"\n from pandas.core.resample import get_resampler\n\n axis = self._get_axis_number(axis)\n return get_resampler(\n self,\n freq=rule,\n label=label,\n closed=closed,\n axis=axis,\n kind=kind,\n loffset=loffset,\n convention=convention,\n base=base,\n key=on,\n level=level,\n )\n\n def first(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Method to subset initial periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n end_date = end = self.index[0] + offset\n\n # Tick-like, e.g. 3 weeks\n if not offset.is_anchored() and hasattr(offset, \"_inc\"):\n if end_date in self.index:\n end = self.index.searchsorted(end_date, side=\"left\")\n return self.iloc[:end]\n\n return self.loc[:end]\n\n def last(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Method to subset final periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n\n start_date = self.index[-1] - offset\n start = self.index.searchsorted(start_date, side=\"right\")\n return self.iloc[start:]\n\n def rank(\n self: FrameOrSeries,\n axis=0,\n method: str = \"average\",\n numeric_only: Optional[bool_t] = None,\n na_option: str = \"keep\",\n ascending: bool_t = True,\n pct: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"\n Compute numerical data ranks (1 through n) along axis.\n\n By default, equal values are assigned a rank that is the average of the\n ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Index to direct ranking.\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups.\n\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n How to rank NaN values:\n\n * keep: assign NaN rank to NaN values\n * top: assign smallest rank to NaN values if ascending\n * bottom: assign highest rank to NaN values if ascending.\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n\n Returns\n -------\n same type as caller\n Return a Series or DataFrame with data ranks as values.\n\n See Also\n --------\n core.groupby.GroupBy.rank : Rank of values within each group.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',\n ... 'spider', 'snake'],\n ... 'Number_legs': [4, 2, 4, 8, np.nan]})\n >>> df\n Animal Number_legs\n 0 cat 4.0\n 1 penguin 2.0\n 2 dog 4.0\n 3 spider 8.0\n 4 snake NaN\n\n The following example shows how the method behaves with the above\n parameters:\n\n * default_rank: this is the default behaviour obtained without using\n any parameter.\n * max_rank: setting ``method = 'max'`` the records that have the\n same values are ranked using the highest rank (e.g.: since 'cat'\n and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)\n * NA_bottom: choosing ``na_option = 'bottom'``, if there are records\n with NaN values they are placed at the bottom of the ranking.\n * pct_rank: when setting ``pct = True``, the ranking is expressed as\n percentile rank.\n\n >>> df['default_rank'] = df['Number_legs'].rank()\n >>> df['max_rank'] = df['Number_legs'].rank(method='max')\n >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')\n >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)\n >>> df\n Animal Number_legs default_rank max_rank NA_bottom pct_rank\n 0 cat 4.0 2.5 3.0 2.5 0.625\n 1 penguin 2.0 1.0 1.0 1.0 0.250\n 2 dog 4.0 2.5 3.0 2.5 0.625\n 3 spider 8.0 4.0 4.0 4.0 1.000\n 4 snake NaN NaN NaN 5.0 NaN\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if na_option not in {\"keep\", \"top\", \"bottom\"}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n\n def ranker(data):\n ranks = algos.rank(\n data.values,\n axis=axis,\n method=method,\n ascending=ascending,\n na_option=na_option,\n pct=pct,\n )\n ranks = self._constructor(ranks, **data._construct_axes_dict())\n return ranks.__finalize__(self)\n\n # if numeric_only is None, and we can't get anything, we try with\n # numeric_only=True\n if numeric_only is None:\n try:\n return ranker(self)\n except TypeError:\n numeric_only = True\n\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n\n return ranker(data)\n\n _shared_docs[\n \"align\"\n ] = \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {'outer', 'inner', 'left', 'right'}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series:\n\n - pad / ffill: propagate last valid observation forward to next valid.\n - backfill / bfill: use NEXT valid observation to fill gap.\n\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n fill_axis : %(axes_single_arg)s, default 0\n Filling axis, method and limit.\n broadcast_axis : %(axes_single_arg)s, default None\n Broadcast values along this axis, if aligning two objects of\n different dimensions.\n\n Returns\n -------\n (left, right) : (%(klass)s, type of other)\n Aligned objects.\n \"\"\"\n\n @Appender(_shared_docs[\"align\"] % _shared_doc_kwargs)\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n method = missing.clean_fill_method(method)\n\n if broadcast_axis == 1 and self.ndim != other.ndim:\n if isinstance(self, ABCSeries):\n # this means other is a DataFrame, and we need to broadcast\n # self\n cons = self._constructor_expanddim\n df = cons(\n {c: self for c in other.columns}, **other._construct_axes_dict()\n )\n return df._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n # this means self is a DataFrame, and we need to broadcast\n # other\n cons = other._constructor_expanddim\n df = cons(\n {c: other for c in self.columns}, **self._construct_axes_dict()\n )\n return self._align_frame(\n df,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if isinstance(other, ABCDataFrame):\n return self._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n return self._align_series(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def _align_frame(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n # defaults\n join_index, join_columns = None, None\n ilidx, iridx = None, None\n clidx, cridx = None, None\n\n is_series = isinstance(self, ABCSeries)\n\n if axis is None or axis == 0:\n if not self.index.equals(other.index):\n join_index, ilidx, iridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if axis is None or axis == 1:\n if not is_series and not self.columns.equals(other.columns):\n join_columns, clidx, cridx = self.columns.join(\n other.columns, how=join, level=level, return_indexers=True\n )\n\n if is_series:\n reindexers = {0: [join_index, ilidx]}\n else:\n reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}\n\n left = self._reindex_with_indexers(\n reindexers, copy=copy, fill_value=fill_value, allow_dups=True\n )\n # other must be always DataFrame\n right = other._reindex_with_indexers(\n {0: [join_index, iridx], 1: [join_columns, cridx]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=True,\n )\n\n if method is not None:\n left = self._ensure_type(\n left.fillna(method=method, axis=fill_axis, limit=limit)\n )\n right = right.fillna(method=method, axis=fill_axis, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _align_series(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n\n is_series = isinstance(self, ABCSeries)\n\n # series/series compat, other must always be a Series\n if is_series:\n if axis:\n raise ValueError(\"cannot align series to a series other than axis 0\")\n\n # equal\n if self.index.equals(other.index):\n join_index, lidx, ridx = None, None, None\n else:\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n left = self._reindex_indexer(join_index, lidx, copy)\n right = other._reindex_indexer(join_index, ridx, copy)\n\n else:\n # one has > 1 ndim\n fdata = self._data\n if axis == 0:\n join_index = self.index\n lidx, ridx = None, None\n if not self.index.equals(other.index):\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=1)\n\n elif axis == 1:\n join_index = self.columns\n lidx, ridx = None, None\n if not self.columns.equals(other.index):\n join_index, lidx, ridx = self.columns.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=0)\n else:\n raise ValueError(\"Must specify axis=0 or 1\")\n\n if copy and fdata is self._data:\n fdata = fdata.copy()\n\n left = self._constructor(fdata)\n\n if ridx is None:\n right = other\n else:\n right = other.reindex(join_index, level=level)\n\n # fill\n fill_na = notna(fill_value) or (method is not None)\n if fill_na:\n left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)\n right = right.fillna(fill_value, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_series or (not is_series and axis == 0):\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n \"\"\"\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # align the cond to same shape as myself\n cond = com.apply_if_callable(cond, self)\n if isinstance(cond, NDFrame):\n cond, _ = cond.align(self, join=\"right\", broadcast_axis=1)\n else:\n if not hasattr(cond, \"shape\"):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n cond = self._constructor(cond, **self._construct_axes_dict())\n\n # make sure we are boolean\n fill_value = bool(inplace)\n cond = cond.fillna(fill_value)\n\n msg = \"Boolean array expected for the condition, not {dtype}\"\n\n if not isinstance(cond, ABCDataFrame):\n # This is a single-dimensional object.\n if not is_bool_dtype(cond):\n raise ValueError(msg.format(dtype=cond.dtype))\n elif not cond.empty:\n for dt in cond.dtypes:\n if not is_bool_dtype(dt):\n raise ValueError(msg.format(dtype=dt))\n\n cond = -cond if inplace else cond\n\n # try to align with other\n try_quick = True\n if hasattr(other, \"align\"):\n\n # align with me\n if other.ndim <= self.ndim:\n\n _, other = self.align(\n other, join=\"left\", axis=axis, level=level, fill_value=np.nan\n )\n\n # if we are NOT aligned, raise as we cannot where index\n if axis is None and not all(\n other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)\n ):\n raise InvalidIndexError\n\n # slice me out of the other\n else:\n raise NotImplementedError(\n \"cannot align with a higher dimensional NDFrame\"\n )\n\n if isinstance(other, np.ndarray):\n\n if other.shape != self.shape:\n\n if self.ndim == 1:\n\n icond = cond.values\n\n # GH 2745 / GH 4192\n # treat like a scalar\n if len(other) == 1:\n other = np.array(other[0])\n\n # GH 3235\n # match True cond to other\n elif len(cond[icond]) == len(other):\n\n # try to not change dtype at first (if try_quick)\n if try_quick:\n new_other = com.values_from_object(self)\n new_other = new_other.copy()\n new_other[icond] = other\n other = new_other\n\n else:\n raise ValueError(\n \"Length of replacements must equal series length\"\n )\n\n else:\n raise ValueError(\n \"other must be the same shape as self when an ndarray\"\n )\n\n # we are the same shape, so create an actual object for alignment\n else:\n other = self._constructor(other, **self._construct_axes_dict())\n\n if axis is None:\n axis = 0\n\n if self.ndim == getattr(other, \"ndim\", 0):\n align = True\n else:\n align = self._get_axis_number(axis) == 1\n\n block_axis = self._get_block_manager_axis(axis)\n\n if inplace:\n # we may have different type blocks come out of putmask, so\n # reconstruct the block manager\n\n self._check_inplace_setting(other)\n new_data = self._data.putmask(\n mask=cond,\n new=other,\n align=align,\n inplace=True,\n axis=block_axis,\n transpose=self._AXIS_REVERSED,\n )\n self._update_inplace(new_data)\n\n else:\n new_data = self._data.where(\n other=other,\n cond=cond,\n align=align,\n errors=errors,\n try_cast=try_cast,\n axis=block_axis,\n )\n\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"where\"\n ] = \"\"\"\n Replace values where the condition is %(cond_rev)s.\n\n Parameters\n ----------\n cond : bool %(klass)s, array-like, or callable\n Where `cond` is %(cond)s, keep the original value. Where\n %(cond_rev)s, replace with corresponding value from `other`.\n If `cond` is callable, it is computed on the %(klass)s and\n should return boolean %(klass)s or array. The callable must\n not change input %(klass)s (though pandas doesn't check it).\n other : scalar, %(klass)s, or callable\n Entries where `cond` is %(cond_rev)s are replaced with\n corresponding value from `other`.\n If other is callable, it is computed on the %(klass)s and\n should return scalar or %(klass)s. The callable must not\n change input %(klass)s (though pandas doesn't check it).\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n axis : int, default None\n Alignment axis if needed.\n level : int, default None\n Alignment level if needed.\n errors : str, {'raise', 'ignore'}, default 'raise'\n Note that currently this parameter won't affect\n the results and will always coerce to a suitable dtype.\n\n - 'raise' : allow exceptions to be raised.\n - 'ignore' : suppress exceptions. On error return original object.\n\n try_cast : bool, default False\n Try to cast the result back to the input type (if possible).\n\n Returns\n -------\n Same type as caller\n\n See Also\n --------\n :func:`DataFrame.%(name_other)s` : Return an object of same shape as\n self.\n\n Notes\n -----\n The %(name)s method is an application of the if-then idiom. For each\n element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the\n element is used; otherwise the corresponding element from the DataFrame\n ``other`` is used.\n\n The signature for :func:`DataFrame.where` differs from\n :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to\n ``np.where(m, df1, df2)``.\n\n For further details and examples see the ``%(name)s`` documentation in\n :ref:`indexing <indexing.where_mask>`.\n\n Examples\n --------\n >>> s = pd.Series(range(5))\n >>> s.where(s > 0)\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s.mask(s > 0)\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s.where(s > 1, 10)\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 2 3\n 2 4 5\n 3 6 7\n 4 8 9\n >>> m = df %% 3 == 0\n >>> df.where(m, -df)\n A B\n 0 0 -1\n 1 -2 3\n 2 -4 -5\n 3 6 -7\n 4 -8 9\n >>> df.where(m, -df) == np.where(m, df, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n >>> df.where(m, -df) == df.mask(~m, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n \"\"\"\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"True\",\n cond_rev=\"False\",\n name=\"where\",\n name_other=\"mask\",\n )\n )\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n other = com.apply_if_callable(other, self)\n return self._where(\n cond, other, inplace, axis, level, errors=errors, try_cast=try_cast\n )\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"False\",\n cond_rev=\"True\",\n name=\"mask\",\n name_other=\"where\",\n )\n )\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n cond = com.apply_if_callable(cond, self)\n\n # see gh-21891\n if not hasattr(cond, \"__invert__\"):\n cond = np.array(cond)\n\n return self.where(\n ~cond,\n other=other,\n inplace=inplace,\n axis=axis,\n level=level,\n try_cast=try_cast,\n errors=errors,\n )\n\n _shared_docs[\n \"shift\"\n ] = \"\"\"\n Shift index by desired number of periods with an optional time `freq`.\n\n When `freq` is not passed, shift the index without realigning the data.\n If `freq` is passed (in this case, the index must be date or datetime,\n or it will raise a `NotImplementedError`), the index will be\n increased using the periods and the `freq`.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n freq : DateOffset, tseries.offsets, timedelta, or str, optional\n Offset to use from the tseries module or time rule (e.g. 'EOM').\n If `freq` is specified then the index values are shifted but the\n data is not realigned. That is, use `freq` if you would like to\n extend the index when shifting and preserve the original data.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Shift direction.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n the default depends on the dtype of `self`.\n For numeric data, ``np.nan`` is used.\n For datetime, timedelta, or period data, etc. :attr:`NaT` is used.\n For extension dtypes, ``self.dtype.na_value`` is used.\n\n .. versionchanged:: 0.24.0\n\n Returns\n -------\n %(klass)s\n Copy of input object, shifted.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n DatetimeIndex.shift : Shift values of DatetimeIndex.\n PeriodIndex.shift : Shift values of PeriodIndex.\n tshift : Shift the time index, using the index's frequency if\n available.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]})\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=1, axis='columns')\n Col1 Col2 Col3\n 0 NaN 10.0 13.0\n 1 NaN 20.0 23.0\n 2 NaN 15.0 18.0\n 3 NaN 30.0 33.0\n 4 NaN 45.0 48.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n \"\"\"\n\n @Appender(_shared_docs[\"shift\"] % _shared_doc_kwargs)\n def shift(\n self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None\n ) -> FrameOrSeries:\n if periods == 0:\n return self.copy()\n\n block_axis = self._get_block_manager_axis(axis)\n if freq is None:\n new_data = self._data.shift(\n periods=periods, axis=block_axis, fill_value=fill_value\n )\n else:\n return self.tshift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:\n \"\"\"\n Equivalent to `shift` without copying data.\n\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n \"\"\"\n if periods == 0:\n return self\n\n if periods > 0:\n vslicer = slice(None, -periods)\n islicer = slice(periods, None)\n else:\n vslicer = slice(-periods, None)\n islicer = slice(None, periods)\n\n new_obj = self._slice(vslicer, axis=axis)\n shifted_axis = self._get_axis(axis)[islicer]\n new_obj.set_axis(shifted_axis, axis=axis, inplace=True)\n\n return new_obj.__finalize__(self)\n\n def tshift(\n self: FrameOrSeries, periods: int = 1, freq=None, axis=0\n ) -> FrameOrSeries:\n \"\"\"\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n freq : DateOffset, timedelta, or str, default None\n Increment to use from the tseries module\n or time rule expressed as a string (e.g. 'EOM').\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0\n Corresponds to the axis that contains the Index.\n\n Returns\n -------\n shifted : Series/DataFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n \"\"\"\n index = self._get_axis(axis)\n if freq is None:\n freq = getattr(index, \"freq\", None)\n\n if freq is None:\n freq = getattr(index, \"inferred_freq\", None)\n\n if freq is None:\n msg = \"Freq was not given and was not set in the index\"\n raise ValueError(msg)\n\n if periods == 0:\n return self\n\n if isinstance(freq, str):\n freq = to_offset(freq)\n\n block_axis = self._get_block_manager_axis(axis)\n if isinstance(index, PeriodIndex):\n orig_freq = to_offset(index.freq)\n if freq == orig_freq:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods)\n elif orig_freq is not None:\n raise ValueError(\n f\"Given freq {freq.rule_code} does not match \"\n f\"PeriodIndex freq {orig_freq.rule_code}\"\n )\n else:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def truncate(\n self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')\n >>> df = pd.DataFrame(index=dates, data={'A': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp('2016-01-05'),\n ... after=pd.Timestamp('2016-01-10')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate('2016-01-05', '2016-01-10').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc['2016-01-05':'2016-01-10', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n # GH 17935\n # Check that index is sorted\n if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n\n # if we have a date index, convert to dates, otherwise\n # treat like a slice\n if ax.is_all_dates:\n from pandas.core.tools.datetimes import to_datetime\n\n before = to_datetime(before)\n after = to_datetime(after)\n\n if before is not None and after is not None:\n if before > after:\n raise ValueError(f\"Truncate: {after} must be after {before}\")\n\n slicer = [slice(None, None)] * self._AXIS_LEN\n slicer[axis] = slice(before, after)\n result = self.loc[tuple(slicer)]\n\n if isinstance(ax, MultiIndex):\n setattr(result, self._get_axis_name(axis), ax.truncate(before, after))\n\n if copy:\n result = result.copy()\n\n return result\n\n def tz_convert(\n self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : str or tzinfo object\n axis : the axis to convert\n level : int, str, default None\n If axis is a MultiIndex, convert a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n\n Returns\n -------\n %(klass)s\n Object with time zone converted axis.\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n \"\"\"\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_convert(ax, tz):\n if not hasattr(ax, \"tz_convert\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_convert(tz)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_convert(ax.levels[level], tz)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_convert(ax, tz)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n def tz_localize(\n self: FrameOrSeries,\n tz,\n axis=0,\n level=None,\n copy: bool_t = True,\n ambiguous=\"raise\",\n nonexistent: str = \"raise\",\n ) -> FrameOrSeries:\n \"\"\"\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : str or tzinfo\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid values are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7),\n ... index=pd.DatetimeIndex(['2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3),\n ... index=pd.DatetimeIndex(['2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2),\n ... index=pd.DatetimeIndex(['2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n \"\"\"\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_localize(ax, tz, ambiguous, nonexistent):\n if not hasattr(ax, \"tz_localize\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_localize(ax, tz, ambiguous, nonexistent)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Numeric Methods\n def abs(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n \"\"\"\n return np.abs(self)\n\n def describe(\n self: FrameOrSeries, percentiles=None, include=None, exclude=None\n ) -> FrameOrSeries:\n \"\"\"\n Generate descriptive statistics.\n\n Descriptive statistics include those that summarize the central\n tendency, dispersion and shape of a\n dataset's distribution, excluding ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - 'all' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n select pandas categorical columns, use ``'category'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n exclude pandas categorical columns, use ``'category'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result's index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value's\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include='all'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series(['a', 'a', 'b', 'c'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64(\"2000-01-01\"),\n ... np.datetime64(\"2010-01-01\"),\n ... np.datetime64(\"2010-01-01\")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),\n ... 'numeric': [1, 2, 3],\n ... 'object': ['a', 'b', 'c']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include='all')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=['category'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n \"\"\"\n if self.ndim == 2 and self.columns.size == 0:\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n if percentiles is not None:\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n validate_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n percentiles = np.asarray(percentiles)\n else:\n percentiles = np.array([0.25, 0.5, 0.75])\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n if len(unique_pcts) < len(percentiles):\n raise ValueError(\"percentiles cannot contain duplicates\")\n percentiles = unique_pcts\n\n formatted_percentiles = format_percentiles(percentiles)\n\n def describe_numeric_1d(series):\n stat_index = (\n [\"count\", \"mean\", \"std\", \"min\"] + formatted_percentiles + [\"max\"]\n )\n d = (\n [series.count(), series.mean(), series.std(), series.min()]\n + series.quantile(percentiles).tolist()\n + [series.max()]\n )\n return pd.Series(d, index=stat_index, name=series.name)\n\n def describe_categorical_1d(data):\n names = [\"count\", \"unique\"]\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result = [data.count(), count_unique]\n dtype = None\n if result[1] > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n names += [\"top\", \"freq\"]\n result += [top, freq]\n\n # If the DataFrame is empty, set 'top' and 'freq' to None\n # to maintain output shape consistency\n else:\n names += [\"top\", \"freq\"]\n result += [np.nan, np.nan]\n dtype = \"object\"\n\n return pd.Series(result, index=names, name=data.name, dtype=dtype)\n\n def describe_timestamp_1d(data):\n # GH-30164\n stat_index = [\"count\", \"mean\", \"min\"] + formatted_percentiles + [\"max\"]\n d = (\n [data.count(), data.mean(), data.min()]\n + data.quantile(percentiles).tolist()\n + [data.max()]\n )\n return pd.Series(d, index=stat_index, name=data.name)\n\n def describe_1d(data):\n if is_bool_dtype(data):\n return describe_categorical_1d(data)\n elif is_numeric_dtype(data):\n return describe_numeric_1d(data)\n elif is_datetime64_any_dtype(data):\n return describe_timestamp_1d(data)\n elif is_timedelta64_dtype(data):\n return describe_numeric_1d(data)\n else:\n return describe_categorical_1d(data)\n\n if self.ndim == 1:\n return describe_1d(self)\n elif (include is None) and (exclude is None):\n # when some numerics are found, keep only numerics\n data = self.select_dtypes(include=[np.number])\n if len(data.columns) == 0:\n data = self\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n data = self\n else:\n data = self.select_dtypes(include=include, exclude=exclude)\n\n ldesc = [describe_1d(s) for _, s in data.items()]\n # set a convenient order for rows\n names: List[Optional[Hashable]] = []\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)\n d.columns = data.columns.copy()\n return d\n\n _shared_docs[\n \"pct_change\"\n ] = \"\"\"\n Percentage change between the current and a prior element.\n\n Computes the percentage change from the immediately previous row by\n default. This is useful in comparing the percentage of change in a time\n series of elements.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'pad'\n How to handle NAs before computing percent changes.\n limit : int, default None\n The number of consecutive NAs to fill before stopping.\n freq : DateOffset, timedelta, or str, optional\n Increment to use from time series API (e.g. 'M' or BDay()).\n **kwargs\n Additional keyword arguments are passed into\n `DataFrame.shift` or `Series.shift`.\n\n Returns\n -------\n chg : Series or DataFrame\n The same type as the calling object.\n\n See Also\n --------\n Series.diff : Compute the difference of two elements in a Series.\n DataFrame.diff : Compute the difference of two elements in a DataFrame.\n Series.shift : Shift the index by some number of periods.\n DataFrame.shift : Shift the index by some number of periods.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([90, 91, 85])\n >>> s\n 0 90\n 1 91\n 2 85\n dtype: int64\n\n >>> s.pct_change()\n 0 NaN\n 1 0.011111\n 2 -0.065934\n dtype: float64\n\n >>> s.pct_change(periods=2)\n 0 NaN\n 1 NaN\n 2 -0.055556\n dtype: float64\n\n See the percentage change in a Series where filling NAs with last\n valid observation forward to next valid.\n\n >>> s = pd.Series([90, 91, None, 85])\n >>> s\n 0 90.0\n 1 91.0\n 2 NaN\n 3 85.0\n dtype: float64\n\n >>> s.pct_change(fill_method='ffill')\n 0 NaN\n 1 0.011111\n 2 0.000000\n 3 -0.065934\n dtype: float64\n\n **DataFrame**\n\n Percentage change in French franc, Deutsche Mark, and Italian lira from\n 1980-01-01 to 1980-03-01.\n\n >>> df = pd.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n Percentage of change in GOOG and APPL stock volume. Shows computing\n the percentage change between columns.\n\n >>> df = pd.DataFrame({\n ... '2016': [1769950, 30586265],\n ... '2015': [1500923, 40912316],\n ... '2014': [1371819, 41403351]},\n ... index=['GOOG', 'APPL'])\n >>> df\n 2016 2015 2014\n GOOG 1769950 1500923 1371819\n APPL 30586265 40912316 41403351\n\n >>> df.pct_change(axis='columns')\n 2016 2015 2014\n GOOG NaN -0.151997 -0.086016\n APPL NaN 0.337604 0.012002\n \"\"\"\n\n @Appender(_shared_docs[\"pct_change\"] % _shared_doc_kwargs)\n def pct_change(\n self: FrameOrSeries,\n periods=1,\n fill_method=\"pad\",\n limit=None,\n freq=None,\n **kwargs,\n ) -> FrameOrSeries:\n # TODO: Not sure if above is correct - need someone to confirm.\n axis = self._get_axis_number(kwargs.pop(\"axis\", self._stat_axis_name))\n if fill_method is None:\n data = self\n else:\n data = self._ensure_type(\n self.fillna(method=fill_method, axis=axis, limit=limit)\n )\n\n rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1\n if freq is not None:\n # Shift method is implemented differently when freq is not None\n # We want to restore the original index\n rs = rs.loc[~rs.index.duplicated()]\n rs = rs.reindex_like(data)\n return rs\n\n def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):\n if axis is None:\n raise ValueError(\"Must specify 'axis' when aggregating by level.\")\n grouped = self.groupby(level=level, axis=axis, sort=False)\n if hasattr(grouped, name) and skipna:\n return getattr(grouped, name)(**kwargs)\n axis = self._get_axis_number(axis)\n method = getattr(type(self), name)\n applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)\n return grouped.aggregate(applyf)\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add the operations to the cls; evaluate the doc strings again\n \"\"\"\n axis_descr, name, name2 = _doc_parms(cls)\n\n cls.any = _make_logical_function(\n cls,\n \"any\",\n name,\n name2,\n axis_descr,\n _any_desc,\n nanops.nanany,\n _any_see_also,\n _any_examples,\n empty_value=False,\n )\n cls.all = _make_logical_function(\n cls,\n \"all\",\n name,\n name2,\n axis_descr,\n _all_desc,\n nanops.nanall,\n _all_see_also,\n _all_examples,\n empty_value=True,\n )\n\n @Substitution(\n desc=\"Return the mean absolute deviation of the values \"\n \"for the requested axis.\",\n name1=name,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=\"\",\n examples=\"\",\n )\n @Appender(_num_doc_mad)\n def mad(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\"mad\", axis=axis, level=level, skipna=skipna)\n\n data = self._get_numeric_data()\n if axis == 0:\n demeaned = data - data.mean(axis=0)\n else:\n demeaned = data.sub(data.mean(axis=1), axis=0)\n return np.abs(demeaned).mean(axis=axis, skipna=skipna)\n\n cls.mad = mad\n\n cls.sem = _make_stat_function_ddof(\n cls,\n \"sem\",\n name,\n name2,\n axis_descr,\n \"Return unbiased standard error of the mean over requested \"\n \"axis.\\n\\nNormalized by N-1 by default. This can be changed \"\n \"using the ddof argument\",\n nanops.nansem,\n )\n cls.var = _make_stat_function_ddof(\n cls,\n \"var\",\n name,\n name2,\n axis_descr,\n \"Return unbiased variance over requested axis.\\n\\nNormalized by \"\n \"N-1 by default. This can be changed using the ddof argument\",\n nanops.nanvar,\n )\n cls.std = _make_stat_function_ddof(\n cls,\n \"std\",\n name,\n name2,\n axis_descr,\n \"Return sample standard deviation over requested axis.\"\n \"\\n\\nNormalized by N-1 by default. This can be changed using the \"\n \"ddof argument\",\n nanops.nanstd,\n )\n\n cls.cummin = _make_cum_function(\n cls,\n \"cummin\",\n name,\n name2,\n axis_descr,\n \"minimum\",\n np.minimum.accumulate,\n \"min\",\n np.inf,\n np.nan,\n _cummin_examples,\n )\n cls.cumsum = _make_cum_function(\n cls,\n \"cumsum\",\n name,\n name2,\n axis_descr,\n \"sum\",\n np.cumsum,\n \"sum\",\n 0.0,\n np.nan,\n _cumsum_examples,\n )\n cls.cumprod = _make_cum_function(\n cls,\n \"cumprod\",\n name,\n name2,\n axis_descr,\n \"product\",\n np.cumprod,\n \"prod\",\n 1.0,\n np.nan,\n _cumprod_examples,\n )\n cls.cummax = _make_cum_function(\n cls,\n \"cummax\",\n name,\n name2,\n axis_descr,\n \"maximum\",\n np.maximum.accumulate,\n \"max\",\n -np.inf,\n np.nan,\n _cummax_examples,\n )\n\n cls.sum = _make_min_count_stat_function(\n cls,\n \"sum\",\n name,\n name2,\n axis_descr,\n \"\"\"Return the sum of the values for the requested axis.\\n\n This is equivalent to the method ``numpy.sum``.\"\"\",\n nanops.nansum,\n _stat_func_see_also,\n _sum_examples,\n )\n cls.mean = _make_stat_function(\n cls,\n \"mean\",\n name,\n name2,\n axis_descr,\n \"Return the mean of the values for the requested axis.\",\n nanops.nanmean,\n )\n cls.skew = _make_stat_function(\n cls,\n \"skew\",\n name,\n name2,\n axis_descr,\n \"Return unbiased skew over requested axis.\\n\\nNormalized by N-1.\",\n nanops.nanskew,\n )\n cls.kurt = _make_stat_function(\n cls,\n \"kurt\",\n name,\n name2,\n axis_descr,\n \"Return unbiased kurtosis over requested axis.\\n\\n\"\n \"Kurtosis obtained using Fisher's definition of\\n\"\n \"kurtosis (kurtosis of normal == 0.0). Normalized \"\n \"by N-1.\",\n nanops.nankurt,\n )\n cls.kurtosis = cls.kurt\n cls.prod = _make_min_count_stat_function(\n cls,\n \"prod\",\n name,\n name2,\n axis_descr,\n \"Return the product of the values for the requested axis.\",\n nanops.nanprod,\n examples=_prod_examples,\n )\n cls.product = cls.prod\n cls.median = _make_stat_function(\n cls,\n \"median\",\n name,\n name2,\n axis_descr,\n \"Return the median of the values for the requested axis.\",\n nanops.nanmedian,\n )\n cls.max = _make_stat_function(\n cls,\n \"max\",\n name,\n name2,\n axis_descr,\n \"\"\"Return the maximum of the values for the requested axis.\\n\n If you want the *index* of the maximum, use ``idxmax``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmax``.\"\"\",\n nanops.nanmax,\n _stat_func_see_also,\n _max_examples,\n )\n cls.min = _make_stat_function(\n cls,\n \"min\",\n name,\n name2,\n axis_descr,\n \"\"\"Return the minimum of the values for the requested axis.\\n\n If you want the *index* of the minimum, use ``idxmin``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmin``.\"\"\",\n nanops.nanmin,\n _stat_func_see_also,\n _min_examples,\n )\n\n @classmethod\n def _add_series_or_dataframe_operations(cls):\n \"\"\"\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n \"\"\"\n from pandas.core.window import EWM, Expanding, Rolling, Window\n\n @Appender(Rolling.__doc__)\n def rolling(\n self,\n window,\n min_periods=None,\n center=False,\n win_type=None,\n on=None,\n axis=0,\n closed=None,\n ):\n axis = self._get_axis_number(axis)\n\n if win_type is not None:\n return Window(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n cls.rolling = rolling\n\n @Appender(Expanding.__doc__)\n def expanding(self, min_periods=1, center=False, axis=0):\n axis = self._get_axis_number(axis)\n return Expanding(self, min_periods=min_periods, center=center, axis=axis)\n\n cls.expanding = expanding\n\n @Appender(EWM.__doc__)\n def ewm(\n self,\n com=None,\n span=None,\n halflife=None,\n alpha=None,\n min_periods=0,\n adjust=True,\n ignore_na=False,\n axis=0,\n ):\n axis = self._get_axis_number(axis)\n return EWM(\n self,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na,\n axis=axis,\n )\n\n cls.ewm = ewm\n\n @Appender(_shared_docs[\"transform\"] % dict(axis=\"\", **_shared_doc_kwargs))\n def transform(self, func, *args, **kwargs):\n result = self.agg(func, *args, **kwargs)\n if is_scalar(result) or len(result) != len(self):\n raise ValueError(\"transforms cannot produce aggregated results\")\n\n return result\n\n # ----------------------------------------------------------------------\n # Misc methods\n\n _shared_docs[\n \"valid_index\"\n ] = \"\"\"\n Return index for %(position)s non-NA/null value.\n\n Returns\n -------\n scalar : type of index\n\n Notes\n -----\n If all elements are non-NA/null, returns None.\n Also returns None for empty %(klass)s.\n \"\"\"\n\n def _find_valid_index(self, how: str):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n \"\"\"\n idxpos = find_valid_index(self._values, how)\n if idxpos is None:\n return None\n return self.index[idxpos]\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"first\", \"klass\": \"Series/DataFrame\"}\n )\n def first_valid_index(self):\n return self._find_valid_index(\"first\")\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"last\", \"klass\": \"Series/DataFrame\"}\n )\n def last_valid_index(self):\n return self._find_valid_index(\"last\")\n\n\ndef _doc_parms(cls):\n \"\"\"Return a tuple of the doc parms.\"\"\"\n axis_descr = (\n f\"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}\"\n )\n name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else \"scalar\"\n name2 = cls.__name__\n return axis_descr, name, name2\n\n\n_num_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n%(min_count)s\\\n**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_doc_mad = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default None\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_ddof_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\n\"\"\"\n\n_bool_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns', None}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be %(empty_value)s, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n If level is specified, then, %(name2)s is returned; otherwise, %(name1)s\n is returned.\n\n%(see_also)s\n%(examples)s\"\"\"\n\n_all_desc = \"\"\"\\\nReturn whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).\"\"\"\n\n_all_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([]).all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a dataframe from a dictionary.\n\n>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if column-wise values all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis='columns'`` to check if row-wise values all return True.\n\n>>> df.all(axis='columns')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n\"\"\"\n\n_all_see_also = \"\"\"\\\nSee Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n\"\"\"\n\n_cnum_doc = \"\"\"\nReturn cumulative %(desc)s over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n%(desc)s.\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns'}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs :\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n\nSee Also\n--------\ncore.window.Expanding.%(accum_func_name)s : Similar functionality\n but ignores ``NaN`` values.\n%(name2)s.%(accum_func_name)s : Return the %(desc)s over\n %(name2)s axis.\n%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.\n%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.\n%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.\n%(name2)s.cumprod : Return cumulative product over %(name2)s axis.\n\n%(examples)s\"\"\"\n\n_cummin_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cumsum_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_cumprod_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cummax_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_any_see_also = \"\"\"\\\nSee Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n\"\"\"\n\n_any_desc = \"\"\"\\\nReturn whether any element is True, potentially over an axis.\n\nReturns False unless there at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).\"\"\"\n\n_any_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([]).any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [0, 2], \"C\": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis='columns')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis='columns')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n\"\"\"\n\n_shared_docs[\n \"stat_func_example\"\n] = \"\"\"\n\nExamples\n--------\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}\n\n{verb} using level names, as well as indices.\n\n>>> s.{stat_func}(level='blooded')\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\n>>> s.{stat_func}(level=0)\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\"\"\"\n\n_sum_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"sum\", verb=\"Sum\", default_output=14, level_output_0=6, level_output_1=8\n)\n\n_sum_examples += \"\"\"\n\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([]).sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou'd like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([]).sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan\"\"\"\n\n_max_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"max\", verb=\"Max\", default_output=8, level_output_0=4, level_output_1=8\n)\n\n_min_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"min\", verb=\"Min\", default_output=0, level_output_0=2, level_output_1=0\n)\n\n_stat_func_see_also = \"\"\"\n\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.sum : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.\"\"\"\n\n_prod_examples = \"\"\"\n\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([]).prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([]).prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan\"\"\"\n\n_min_count_stub = \"\"\"\\\nmin_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n .. versionadded:: 0.22.0\n\n Added with the default being 0. This means the sum of an all-NA\n or empty Series is 0, and the product of an all-NA or empty\n Series is 1.\n\"\"\"\n\n\ndef _make_min_count_stat_function(\n cls, name, name1, name2, axis_descr, desc, f, see_also: str = \"\", examples: str = \"\"\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=_min_count_stub,\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self,\n axis=None,\n skipna=None,\n level=None,\n numeric_only=None,\n min_count=0,\n **kwargs,\n ):\n if name == \"sum\":\n nv.validate_sum(tuple(), kwargs)\n elif name == \"prod\":\n nv.validate_prod(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, min_count=min_count\n )\n return self._reduce(\n f,\n name,\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n min_count=min_count,\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function(\n cls, name, name1, name2, axis_descr, desc, f, see_also: str = \"\", examples: str = \"\"\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs\n ):\n if name == \"median\":\n nv.validate_median(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n f, name, axis=axis, skipna=skipna, numeric_only=numeric_only\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):\n @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)\n @Appender(_num_ddof_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, ddof=ddof\n )\n return self._reduce(\n f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_cum_function(\n cls,\n name,\n name1,\n name2,\n axis_descr,\n desc,\n accum_func,\n accum_func_name,\n mask_a,\n mask_b,\n examples,\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n accum_func_name=accum_func_name,\n examples=examples,\n )\n @Appender(_cnum_doc)\n def cum_func(self, axis=None, skipna=True, *args, **kwargs):\n skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)\n if axis is None:\n axis = self._stat_axis_number\n else:\n axis = self._get_axis_number(axis)\n\n if axis == 1:\n return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T\n\n def na_accum_func(blk_values):\n # We will be applying this function to block values\n if blk_values.dtype.kind in [\"m\", \"M\"]:\n # GH#30460, GH#29058\n # numpy 1.18 started sorting NaTs at the end instead of beginning,\n # so we need to work around to maintain backwards-consistency.\n orig_dtype = blk_values.dtype\n\n # We need to define mask before masking NaTs\n mask = isna(blk_values)\n\n if accum_func == np.minimum.accumulate:\n # Note: the accum_func comparison fails as an \"is\" comparison\n y = blk_values.view(\"i8\")\n y[mask] = np.iinfo(np.int64).max\n changed = True\n else:\n y = blk_values\n changed = False\n\n result = accum_func(y.view(\"i8\"), axis)\n if skipna:\n np.putmask(result, mask, iNaT)\n elif accum_func == np.minimum.accumulate:\n # Restore NaTs that we masked previously\n nz = (~np.asarray(mask)).nonzero()[0]\n if len(nz):\n # everything up to the first non-na entry stays NaT\n result[: nz[0]] = iNaT\n\n if changed:\n # restore NaT elements\n y[mask] = iNaT # TODO: could try/finally for this?\n\n if isinstance(blk_values, np.ndarray):\n result = result.view(orig_dtype)\n else:\n # DatetimeArray\n result = type(blk_values)._from_sequence(result, dtype=orig_dtype)\n\n elif skipna and not issubclass(\n blk_values.dtype.type, (np.integer, np.bool_)\n ):\n vals = blk_values.copy().T\n mask = isna(vals)\n np.putmask(vals, mask, mask_a)\n result = accum_func(vals, axis)\n np.putmask(result, mask, mask_b)\n else:\n result = accum_func(blk_values.T, axis)\n\n # transpose back for ndarray, not for EA\n return result.T if hasattr(result, \"T\") else result\n\n result = self._data.apply(na_accum_func)\n\n d = self._construct_axes_dict()\n d[\"copy\"] = False\n return self._constructor(result, **d).__finalize__(self)\n\n return set_function_name(cum_func, name, cls)\n\n\ndef _make_logical_function(\n cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n see_also=see_also,\n examples=examples,\n empty_value=empty_value,\n )\n @Appender(_bool_doc)\n def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):\n nv.validate_logical_func(tuple(), kwargs, fname=name)\n if level is not None:\n if bool_only is not None:\n raise NotImplementedError(\n \"Option bool_only is not implemented with option level.\"\n )\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n f,\n name,\n axis=axis,\n skipna=skipna,\n numeric_only=bool_only,\n filter_type=\"bool\",\n )\n\n return set_function_name(logical_func, name, cls)\n"
] |
[
[
"pandas.tseries.frequencies.to_offset",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.inference.is_hashable",
"numpy.unique",
"numpy.asanyarray",
"pandas.core.dtypes.common.is_re_compilable",
"pandas.concat",
"pandas.core.dtypes.common.is_list_like",
"pandas.compat.numpy.function.validate_cum_func_with_skipna",
"pandas.io.pickle.to_pickle",
"numpy.array",
"pandas.core.dtypes.common.is_period_arraylike",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.window.Window",
"pandas.core.dtypes.missing.isna",
"pandas.io.sql.to_sql",
"pandas.Series",
"numpy.asarray",
"pandas.io.json.to_json",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.iinfo",
"pandas.core.common.SettingWithCopyError",
"pandas.compat._optional.import_optional_dependency",
"pandas._config.config.is_nonnegative_int",
"numpy.putmask",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.ensure_str",
"pandas.core.indexes.api.Index",
"numpy.errstate",
"pandas.core.computation.parsing.clean_column_name",
"pandas.core.common.random_state",
"pandas.core.dtypes.common.is_integer",
"pandas.core.resample.get_resampler",
"pandas.util._decorators.doc",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas.core.window.EWM",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.missing.find_valid_index",
"pandas.core.dtypes.missing.notna",
"pandas.DataFrame",
"pandas.core.common.pipe",
"pandas.core.indexes.api.RangeIndex",
"pandas.core.common.values_from_object",
"pandas.io.pytables.to_hdf",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.errors.AbstractMethodError",
"pandas.core.dtypes.common.is_number",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.window.Rolling",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.isnan",
"pandas.core.algorithms.rank",
"pandas.core.missing.mask_missing",
"pandas.core.dtypes.common.is_bool",
"pandas.core.missing.get_fill_func",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.missing.clean_fill_method",
"pandas.core.common.get_rename_function",
"pandas.core.dtypes.common.is_scalar",
"pandas.io.formats.csvs.CSVFormatter",
"pandas.io.formats.format.format_percentiles",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.indexes.api.ensure_index",
"pandas.core.resample.asfreq",
"pandas.compat.numpy.function.validate_clip_with_axis",
"pandas.util._validators.validate_percentile",
"pandas.core.indexes.period.Period",
"numpy.any",
"pandas.io.clipboards.to_clipboard",
"pandas.core.window.Expanding",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.tools.datetimes.to_datetime",
"pandas.core.common.maybe_make_list",
"pandas._config.config.get_option",
"pandas.core.common.count_not_none",
"numpy.abs",
"pandas.core.ops._align_method_FRAME",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.common.apply_if_callable",
"numpy.prod",
"pandas.core.common.index_labels_to_array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
QDucasse/FDIA_simulation
|
[
"bdd0cb072f07b9a96fd82df581c9c7493ae66cbc",
"bdd0cb072f07b9a96fd82df581c9c7493ae66cbc"
] |
[
"fdia_simulation/tests/filters/test_filters_ct.py",
"fdia_simulation/tests/attackers/test_period_attacker.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 22 09:19:47 2019\n\n@author: qde\n\"\"\"\n\nimport unittest\nimport numpy as np\nfrom pprint import pprint\nfrom copy import deepcopy\nfrom math import sqrt,atan2, cos, sin\nfrom nose.tools import raises\nfrom numpy.linalg import inv\nfrom scipy.linalg import block_diag\nfrom fdia_simulation.models import Radar, LabeledMeasurement\nfrom fdia_simulation.filters import RadarFilterCT, MultipleRadarsFilterCT, MultiplePeriodRadarsFilterCT\n\n\nclass RadarFilterCTTestCase(unittest.TestCase):\n def setUp(self):\n self.radar = Radar(x=0,y=0)\n self.q = 10.\n self.filter_ct = RadarFilterCT(dim_x = 9, dim_z = 3, q = self.q,\n radar = self.radar)\n\n\n # ==========================================================================\n # ========================= Initialization tests ===========================\n\n def test_initial_F(self):\n dt = self.filter_ct.dt\n X = self.filter_ct.x\n vx = X[1,0]\n ax = X[2,0]\n vy = X[4,0]\n ay = X[5,0]\n vz = X[7,0]\n az = X[8,0]\n omega = sqrt(ax**2 + ay**2 + az**2)/sqrt(vx**2 + vy**2 + vz**2)\n F_block = np.array([[1, sin(omega*dt)/omega, (1 - cos(omega*dt))/omega**2],\n [0, cos(omega*dt), sin(omega*dt)/omega],\n [0, -omega*sin(omega*dt), cos(omega*dt)]])\n F = block_diag(F_block,F_block,F_block)\n self.assertTrue(np.array_equal(self.filter_ct.F,F))\n\n def test_initial_R(self):\n dt = self.filter_ct.dt\n R = np.array([[1., 0. , 0. ],\n [0., 0.001, 0. ],\n [0., 0. , 0.001]])\n self.assertTrue(np.array_equal(self.filter_ct.R,R))\n\n def test_initial_Q(self):\n dt = self.filter_ct.dt\n q = self.q\n Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0,dt, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0,dt, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,dt]])\n self.assertTrue(np.array_equal(self.filter_ct.Q,Q))\n\n def test_initial_positions(self):\n x0 = self.filter_ct.x[0,0]\n y0 = self.filter_ct.x[3,0]\n z0 = self.filter_ct.x[6,0]\n self.assertEqual(x0, 1e-6)\n self.assertEqual(y0, 1e-6)\n self.assertEqual(z0, 1e-6)\n\n def test_initial_velocities(self):\n vx0 = self.filter_ct.x[1,0]\n vy0 = self.filter_ct.x[4,0]\n vz0 = self.filter_ct.x[7,0]\n self.assertEqual(vx0, 1e-6)\n self.assertEqual(vy0, 1e-6)\n self.assertEqual(vz0, 1e-6)\n\n def test_initial_accelerations(self):\n vx0 = self.filter_ct.x[2,0]\n vy0 = self.filter_ct.x[5,0]\n vz0 = self.filter_ct.x[8,0]\n self.assertEqual(vx0, 1e-6)\n self.assertEqual(vy0, 1e-6)\n self.assertEqual(vz0, 1e-6)\n\n def test_initial_radar_positions(self):\n x_rad = self.filter_ct.x_rad\n y_rad = self.filter_ct.y_rad\n z_rad = self.filter_ct.z_rad\n self.assertEqual(x_rad, 0.)\n self.assertEqual(y_rad, 0.)\n self.assertEqual(z_rad, 0.)\n\n # ==========================================================================\n # ========================= Q/F generation tests ===========================\n\n def test_F_computing(self):\n dt = 5.\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n self.filter_ct.dt = dt\n self.filter_ct.x = X\n vx = X[1,0]\n ax = X[2,0]\n vy = X[4,0]\n ay = X[5,0]\n vz = X[7,0]\n az = X[8,0]\n omega = sqrt(ax**2 + ay**2 + az**2)/sqrt(vx**2 + vy**2 + vz**2)\n F_block = np.array([[1, sin(omega*dt)/omega, (1 - cos(omega*dt))/omega**2],\n [0, cos(omega*dt), sin(omega*dt)/omega],\n [0, -omega*sin(omega*dt), cos(omega*dt)]])\n F = block_diag(F_block,F_block,F_block)\n\n computed_F = self.filter_ct.compute_F(self.filter_ct.x)\n self.assertTrue(np.array_equal(self.filter_ct.F,F))\n self.assertTrue(np.array_equal(computed_F,F))\n\n def test_Q_computing(self):\n dt = 5.\n q = 20.\n Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0,dt, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0,dt, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,dt]])\n self.filter_ct.dt = dt\n computed_Q = self.filter_ct.compute_Q(q)\n self.assertTrue(np.array_equal(self.filter_ct.Q,Q))\n self.assertTrue(np.array_equal(computed_Q,Q))\n\n # ==========================================================================\n # ========================= hx/HJacob tests ================================\n def test_HJacob_computing(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n x = X[0,0]\n y = X[3,0]\n z = X[6,0]\n H = np.array([[x/sqrt(x**2 + y**2 + z**2), 0, 0, y/sqrt(x**2 + y**2 + z**2), 0, 0, z/sqrt(x**2 + y**2 + z**2),0 ,0],\n [-y/(x**2 + y**2), 0, 0, x/(x**2 + y**2), 0, 0, 0, 0, 0],\n [-x*z/(sqrt(x**2 + y**2)*(x**2 + y**2 + z**2)), 0, 0, -y*z/(sqrt(x**2 + y**2)*(x**2 + y**2 + z**2)), 0, 0, sqrt(x**2 + y**2)/(x**2 + y**2 + z**2), 0, 0]])\n\n computed_H = self.filter_ct.HJacob(X)\n self.assertTrue(np.array_equal(computed_H,H))\n\n def test_hx_computing(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n x = X[0,0]\n y = X[3,0]\n z = X[6,0]\n r = sqrt(x**2 + y**2 + z**2)\n theta = atan2(y,x)\n phi = atan2(z,sqrt(x**2 + y**2))\n Zk = np.array([[r,theta,phi]]).T\n computed_Zk = self.filter_ct.hx(X)\n self.assertTrue(np.array_equal(Zk,computed_Zk))\n\n # ==========================================================================\n # ========================= predict/update cycle tests =====================\n\n def test_residual_of(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T\n z = np.array([[200, 10, 10]]).T\n computed_resid = z - self.filter_ct.HJacob(X)@X_prior\n\n self.filter_ct.x = X\n self.filter_ct.x_prior = X_prior\n resid = self.filter_ct.residual_of(z)\n\n self.assertTrue(np.array_equal(computed_resid,resid))\n\n def test_predict(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n filt = self.filter_ct\n filt.x = X\n pre_F = deepcopy(filt.F)\n F = filt.compute_F(X)\n P = filt.P\n Q = filt.Q\n predicted_X = F@X\n predicted_P = F@[email protected] + Q\n\n filt.F = pre_F # Needed to keep F unaltered as before the predict step\n filt.predict()\n self.assertTrue(np.array_equal(predicted_X,filt.x))\n self.assertTrue(np.array_equal(predicted_P,filt.P))\n self.assertTrue(np.array_equal(predicted_X,filt.x_prior))\n self.assertTrue(np.array_equal(predicted_P,filt.P_prior))\n\n\n def test_update(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n z = np.array([[200, 10, 10]]).T\n filt = self.filter_ct\n filt.x = X\n filt.predict()\n H = filt.HJacob(filt.x)\n S = [email protected]@H.T + filt.R\n K = [email protected]@inv(S)\n\n hx = filt.hx(filt.x)\n y = z - hx\n new_X = filt.x + K@y\n IKH = (filt._I - K@H)\n new_P = ([email protected])@IKH.T + ([email protected])@K.T\n\n filt.update(z)\n self.assertTrue(np.array_equal(filt.P,new_P))\n self.assertTrue(np.array_equal(filt.x,new_X))\n\n\nclass MultipleRadarsCTTestCase(unittest.TestCase):\n def setUp(self):\n self.radar1 = Radar(x=800,y=800)\n self.radar2 = Radar(x=200,y=200)\n radars = [self.radar1,self.radar2]\n self.q = 10.\n self.multiple_ct = MultipleRadarsFilterCT(dim_x = 9, dim_z = 3, q = self.q, radars = radars,\n x0 = 100, y0 = 100)\n\n # ==========================================================================\n # ========================= Initialization tests ===========================\n def test_initial_radar_positions(self):\n positions = [[self.radar1.x,self.radar1.y,self.radar1.z],[self.radar2.x,self.radar2.y,self.radar2.z]]\n computed_positions = self.multiple_ct.radar_positions\n self.assertEqual(computed_positions,positions)\n\n def test_initial_R(self):\n dt = self.multiple_ct.dt\n R = np.array([[1., 0. , 0. , 0., 0. , 0. ],\n [0., 0.001, 0. , 0., 0. , 0. ],\n [0., 0. , 0.001, 0., 0. , 0. ],\n [0., 0. , 0. , 1., 0. , 0. ],\n [0., 0. , 0. , 0., 0.001, 0. ],\n [0., 0. , 0. , 0., 0. , 0.001]])\n self.assertTrue(np.array_equal(self.multiple_ct.R,R))\n\n def test_initial_F(self):\n dt = self.multiple_ct.dt\n X = self.multiple_ct.x\n vx = X[1,0]\n ax = X[2,0]\n vy = X[4,0]\n ay = X[5,0]\n vz = X[7,0]\n az = X[8,0]\n omega = sqrt(ax**2 + ay**2 + az**2)/sqrt(vx**2 + vy**2 + vz**2)\n F_block = np.array([[1, sin(omega*dt)/omega, (1 - cos(omega*dt))/omega**2],\n [0, cos(omega*dt), sin(omega*dt)/omega],\n [0, -omega*sin(omega*dt), cos(omega*dt)]])\n F = block_diag(F_block,F_block,F_block)\n self.assertTrue(np.array_equal(self.multiple_ct.F,F))\n\n def test_initial_Q(self):\n dt = self.multiple_ct.dt\n q = self.q\n Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0,dt, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0,dt, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,dt]])\n self.assertTrue(np.array_equal(self.multiple_ct.Q,Q))\n\n\n # ==========================================================================\n # ========================= Q/F generation tests ===========================\n\n def test_F_computing(self):\n dt = 5.\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n self.multiple_ct.x = X\n self.multiple_ct.dt = dt\n vx = X[1,0]\n ax = X[2,0]\n vy = X[4,0]\n ay = X[5,0]\n vz = X[7,0]\n az = X[8,0]\n omega = sqrt(ax**2 + ay**2 + az**2)/sqrt(vx**2 + vy**2 + vz**2)\n F_block = np.array([[1, sin(omega*dt)/omega, (1 - cos(omega*dt))/omega**2],\n [0, cos(omega*dt), sin(omega*dt)/omega],\n [0, -omega*sin(omega*dt), cos(omega*dt)]])\n F = block_diag(F_block,F_block,F_block)\n\n computed_F = self.multiple_ct.compute_F(self.multiple_ct.x)\n self.assertTrue(np.array_equal(self.multiple_ct.F,F))\n self.assertTrue(np.array_equal(computed_F,F))\n\n def test_Q_computing(self):\n dt = 5.\n q = 20.\n Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0,dt, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0,dt, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,dt]])\n self.multiple_ct.dt = dt\n computed_Q = self.multiple_ct.compute_Q(q)\n self.assertTrue(np.array_equal(self.multiple_ct.Q,Q))\n self.assertTrue(np.array_equal(computed_Q,Q))\n\n # ==========================================================================\n # ============================= HJacob/hx generation =======================\n\n def test_HJacob_computing(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n x1 = X[0,0] - self.radar1.x\n y1 = X[3,0] - self.radar1.y\n z1 = X[6,0] - self.radar1.z\n x2 = X[0,0] - self.radar2.x\n y2 = X[3,0] - self.radar2.y\n z2 = X[6,0] - self.radar2.z\n H = np.array([[x1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, y1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, z1/sqrt(x1**2 + y1**2 + z1**2),0 ,0],\n [-y1/(x1**2 + y1**2), 0, 0, x1/(x1**2 + y1**2), 0, 0, 0, 0, 0],\n [-x1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, -y1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, sqrt(x1**2 + y1**2)/(x1**2 + y1**2 + z1**2), 0, 0],\n [x2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, y2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, z2/sqrt(x2**2 + y2**2 + z2**2),0 ,0],\n [-y2/(x2**2 + y2**2), 0, 0, x2/(x2**2 + y2**2), 0, 0, 0, 0, 0],\n [-x2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, -y2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, sqrt(x2**2 + y2**2)/(x2**2 + y2**2 + z2**2), 0, 0]])\n\n computed_H = self.multiple_ct.HJacob(X)\n self.assertTrue(np.array_equal(computed_H,H))\n\n def test_hx_computing(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n x1 = X[0,0] - self.radar1.x\n y1 = X[3,0] - self.radar1.y\n z1 = X[6,0] - self.radar1.z\n x2 = X[0,0] - self.radar2.x\n y2 = X[3,0] - self.radar2.y\n z2 = X[6,0] - self.radar2.z\n r1 = sqrt(x1**2 + y1**2 + z1**2)\n theta1 = atan2(y1,x1)\n phi1 = atan2(z1,sqrt(x1**2 + y1**2))\n r2 = sqrt(x2**2 + y2**2 + z2**2)\n theta2 = atan2(y2,x2)\n phi2 = atan2(z2,sqrt(x2**2 + y2**2))\n Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T\n computed_Zk = self.multiple_ct.hx(X)\n self.assertTrue(np.array_equal(Zk,computed_Zk))\n\n # ==========================================================================\n # ========================= predict/update cycle tests =====================\n\n def test_residual_of(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T\n z = np.array([[200, 10, 10, 210, 9, 8]]).T\n computed_resid = z - self.multiple_ct.HJacob(X)@X_prior\n\n self.multiple_ct.x = X\n self.multiple_ct.x_prior = X_prior\n resid = self.multiple_ct.residual_of(z)\n\n self.assertTrue(np.array_equal(computed_resid,resid))\n\n def test_predict(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n filt = self.multiple_ct\n filt.x = X\n pre_F = deepcopy(filt.F)\n F = filt.compute_F(X)\n P = filt.P\n Q = filt.Q\n predicted_X = F@X\n predicted_P = F@[email protected] + Q\n\n filt.F = pre_F # Needed to keep F unaltered as before the predict step\n filt.predict()\n self.assertTrue(np.array_equal(predicted_X,filt.x))\n self.assertTrue(np.array_equal(predicted_P,filt.P))\n self.assertTrue(np.array_equal(predicted_X,filt.x_prior))\n self.assertTrue(np.array_equal(predicted_P,filt.P_prior))\n\n\n def test_update(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n z = np.array([[200, 10, 10, 210, 9, 8]]).T\n filt = self.multiple_ct\n filt.x = X\n filt.predict()\n H = filt.HJacob(filt.x)\n S = [email protected]@H.T + filt.R\n K = [email protected]@inv(S)\n\n hx = filt.hx(filt.x)\n y = z - hx\n new_X = filt.x + K@y\n IKH = (filt._I - K@H)\n new_P = ([email protected])@IKH.T + ([email protected])@K.T\n\n filt.update(z)\n print(filt.P-new_P)\n self.assertTrue(np.allclose(filt.P,new_P))\n self.assertTrue(np.allclose(filt.x,new_X))\n\nclass MultiplePeriodRadarsCTTestCase(unittest.TestCase):\n def setUp(self):\n self.radar1 = Radar(x=800,y=800)\n self.radar2 = Radar(x=200,y=200)\n radars = [self.radar1,self.radar2]\n self.q = 10.\n self.multiplef_ct = MultiplePeriodRadarsFilterCT(dim_x = 9, dim_z = 3, q = self.q,\n radars = radars,\n x0 = 100, y0 = 100)\n\n # ==========================================================================\n # ========================= Initialization tests ===========================\n\n def test_initial_radar_positions(self):\n positions = [[self.radar1.x,self.radar1.y,self.radar1.z],[self.radar2.x,self.radar2.y,self.radar2.z]]\n computed_positions = self.multiplef_ct.radar_positions\n self.assertEqual(computed_positions,positions)\n\n def test_initial_R(self):\n dt = self.multiplef_ct.dt\n R = np.array([[1., 0. , 0. , 0., 0. , 0. ],\n [0., 0.001, 0. , 0., 0. , 0. ],\n [0., 0. , 0.001, 0., 0. , 0. ],\n [0., 0. , 0. , 1., 0. , 0. ],\n [0., 0. , 0. , 0., 0.001, 0. ],\n [0., 0. , 0. , 0., 0. , 0.001]])\n self.assertTrue(np.array_equal(self.multiplef_ct.R,R))\n\n def test_initial_F(self):\n dt = self.multiplef_ct.dt\n X = self.multiplef_ct.x\n vx = X[1,0]\n ax = X[2,0]\n vy = X[4,0]\n ay = X[5,0]\n vz = X[7,0]\n az = X[8,0]\n omega = sqrt(ax**2 + ay**2 + az**2)/sqrt(vx**2 + vy**2 + vz**2)\n F_block = np.array([[1, sin(omega*dt)/omega, (1 - cos(omega*dt))/omega**2],\n [0, cos(omega*dt), sin(omega*dt)/omega],\n [0, -omega*sin(omega*dt), cos(omega*dt)]])\n F = block_diag(F_block,F_block,F_block)\n self.assertTrue(np.array_equal(self.multiplef_ct.F,F))\n\n def test_initial_Q(self):\n dt = self.multiplef_ct.dt\n q = self.q\n Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0,dt, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0,dt, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,dt]])\n self.assertTrue(np.array_equal(self.multiplef_ct.Q,Q))\n\n def test_tag_radars(self):\n self.assertEqual(self.radar1.tag, 0)\n self.assertEqual(self.radar2.tag, 1)\n\n # ==========================================================================\n # ========================= Q/F generation tests ===========================\n\n def test_F_computing(self):\n dt = 5.\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n self.multiplef_ct.x = X\n self.multiplef_ct.dt = dt\n vx = X[1,0]\n ax = X[2,0]\n vy = X[4,0]\n ay = X[5,0]\n vz = X[7,0]\n az = X[8,0]\n omega = sqrt(ax**2 + ay**2 + az**2)/sqrt(vx**2 + vy**2 + vz**2)\n F_block = np.array([[1, sin(omega*dt)/omega, (1 - cos(omega*dt))/omega**2],\n [0, cos(omega*dt), sin(omega*dt)/omega],\n [0, -omega*sin(omega*dt), cos(omega*dt)]])\n F = block_diag(F_block,F_block,F_block)\n\n computed_F = self.multiplef_ct.compute_F(self.multiplef_ct.x)\n self.assertTrue(np.array_equal(self.multiplef_ct.F,F))\n self.assertTrue(np.array_equal(computed_F,F))\n\n def test_Q_computing(self):\n dt = 5.\n q = 20.\n Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0,dt, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0,dt, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,dt]])\n self.multiplef_ct.dt = dt\n computed_Q = self.multiplef_ct.compute_Q(q)\n self.assertTrue(np.array_equal(self.multiplef_ct.Q,Q))\n self.assertTrue(np.array_equal(computed_Q,Q))\n\n\n # ==========================================================================\n # ============================= HJacob/hx generation =======================\n\n def test_HJacob_computing_tag_is_0(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n tag = 0\n x2 = X[0,0] - self.radar2.x\n y2 = X[3,0] - self.radar2.y\n z2 = X[6,0] - self.radar2.z\n H = np.array([[x1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, y1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, z1/sqrt(x1**2 + y1**2 + z1**2),0 ,0],\n [-y1/(x1**2 + y1**2), 0, 0, x1/(x1**2 + y1**2), 0, 0, 0, 0, 0],\n [-x1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, -y1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, sqrt(x1**2 + y1**2)/(x1**2 + y1**2 + z1**2), 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]])\n\n computed_H = self.multiplef_ct.HJacob(X,tag = tag)\n self.assertTrue(np.array_equal(computed_H,H))\n\n def test_HJacob_computing_tag_is_0(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n tag = 1\n x2 = X[0,0] - self.radar2.x\n y2 = X[3,0] - self.radar2.y\n z2 = X[6,0] - self.radar2.z\n H = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [x2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, y2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, z2/sqrt(x2**2 + y2**2 + z2**2),0 ,0],\n [-y2/(x2**2 + y2**2), 0, 0, x2/(x2**2 + y2**2), 0, 0, 0, 0, 0],\n [-x2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, -y2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, sqrt(x2**2 + y2**2)/(x2**2 + y2**2 + z2**2), 0, 0]])\n computed_H = self.multiplef_ct.HJacob(X,tag = tag)\n self.assertTrue(np.array_equal(computed_H,H))\n\n def test_hx_computing_tag_is_0(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n tag = 0\n x1 = X[0,0] - self.radar1.x\n y1 = X[3,0] - self.radar1.y\n z1 = X[6,0] - self.radar1.z\n r1 = sqrt(x1**2 + y1**2 + z1**2)\n theta1 = atan2(y1,x1)\n phi1 = atan2(z1,sqrt(x1**2 + y1**2))\n r2 = 0\n theta2 = 0\n phi2 = 0\n Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T\n computed_Zk = self.multiplef_ct.hx(X, tag = tag)\n self.assertTrue(np.array_equal(Zk,computed_Zk))\n\n def test_hx_computing_tag_is_1(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n tag = 1\n x2 = X[0,0] - self.radar2.x\n y2 = X[3,0] - self.radar2.y\n z2 = X[6,0] - self.radar2.z\n r1 = 0\n theta1 = 0\n phi1 = 0\n r2 = sqrt(x2**2 + y2**2 + z2**2)\n theta2 = atan2(y2,x2)\n phi2 = atan2(z2,sqrt(x2**2 + y2**2))\n Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T\n computed_Zk = self.multiplef_ct.hx(X, tag = tag)\n self.assertTrue(np.array_equal(Zk,computed_Zk))\n\n# ==========================================================================\n# ========================= predict/update cycle tests =====================\n\n def test_residual_of(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T\n z = np.array([[200, 10, 10]]).T\n tag = 0\n z_input = self.multiplef_ct.gen_complete_measurement(tag = tag, z = z)\n computed_resid = z_input - self.multiplef_ct.HJacob(X,tag = 0)@X_prior\n\n self.multiplef_ct.x = X\n self.multiplef_ct.x_prior = X_prior\n resid = self.multiplef_ct.residual_of(z = z, tag = tag)\n\n self.assertTrue(np.array_equal(computed_resid,resid))\n\n def test_predict(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n filt = self.multiplef_ct\n filt.x = X\n pre_F = deepcopy(filt.F)\n F = filt.compute_F(X)\n P = filt.P\n Q = filt.Q\n predicted_X = F@X\n predicted_P = F@[email protected] + Q\n\n filt.F = pre_F # Needed to keep F unaltered as before the predict step\n filt.predict()\n self.assertTrue(np.array_equal(predicted_X,filt.x))\n self.assertTrue(np.array_equal(predicted_P,filt.P))\n self.assertTrue(np.array_equal(predicted_X,filt.x_prior))\n self.assertTrue(np.array_equal(predicted_P,filt.P_prior))\n\n def test_update_times(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n tag = 1\n time = 1.\n z = np.array([[210, 9, 8]]).T\n labeled_z = LabeledMeasurement(tag = tag, time = 1., value = z)\n filt = self.multiplef_ct\n filt.x = X\n filt._last_t = 0.5\n\n dt = time - filt._last_t\n new_last_t = time\n filt.predict()\n filt.update(labeled_z)\n\n self.assertEqual(new_last_t, filt._last_t)\n self.assertEqual(dt, filt.dt)\n\n def test_update(self):\n X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T\n tag = 0\n z = np.array([[200, 10, 10]]).T\n labeled_z = LabeledMeasurement(tag = tag, value = z, time = 1.)\n filt = self.multiplef_ct\n filt.x = X\n filt.predict()\n H = filt.HJacob(filt.x, tag = tag)\n S = [email protected]@H.T + filt.R\n K = [email protected]@inv(S)\n\n hx = filt.hx(filt.x, tag = tag)\n z_input = filt.gen_complete_measurement(tag = tag, z = z)\n y = z_input - hx\n new_X = filt.x + K@y\n IKH = (filt._I - K@H)\n new_P = ([email protected])@IKH.T + ([email protected])@K.T\n\n filt.update(labeled_z)\n self.assertTrue(np.allclose(filt.P,new_P))\n self.assertTrue(np.allclose(filt.x,new_X))\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 14:37:53 2019\n\n@author: qde\n\"\"\"\n\nimport unittest\nimport numpy as np\nfrom nose.tools import raises\nfrom filterpy.kalman import KalmanFilter,ExtendedKalmanFilter\nfrom fdia_simulation.models import Radar, PeriodRadar, LabeledMeasurement\nfrom fdia_simulation.attackers import PeriodAttacker, DOSPeriodAttacker, DriftPeriodAttacker\n\nclass PeriodAttackerTestCase(unittest.TestCase):\n def setUp(self):\n # Simulated filter for 2 radars (2*3 measurements)\n self.filter = ExtendedKalmanFilter(dim_x = 9, dim_z = 6)\n self.gamma = np.eye(3)\n self.mag_vector = np.array([[-10, -10, -10]]).T\n self.t0 = 10\n self.time = 50\n self.radar = Radar(x=10,y=10)\n self.radar_pos = 1\n self.attacker = PeriodAttacker(filter = self.filter,\n radar = self.radar, radar_pos = self.radar_pos,\n gamma = self.gamma,mag_vector = self.mag_vector,\n t0 = self.t0, time = self.time)\n\n # ==========================================================================\n # ========================== Initialization tests ==========================\n\n def test_initialization_no_errors(self):\n self.assertTrue(np.array_equal(self.attacker.gamma,self.gamma))\n self.assertTrue(np.array_equal(self.attacker.mag_vector,self.mag_vector))\n self.assertEqual(self.t0,10)\n self.assertEqual(self.time,50)\n\n def test_initialization_wrong_mag_vector(self):\n with self.assertRaises(ValueError):\n mag_vector = np.array([[-10,-10]])\n attacker = PeriodAttacker(filter = self.filter,\n radar = self.radar, radar_pos = self.radar_pos,\n gamma = self.gamma,mag_vector = mag_vector,\n t0 = self.t0, time = self.time)\n\n def test_initialization_wrong_gamma(self):\n with self.assertRaises(ValueError):\n filter = ExtendedKalmanFilter(dim_x = 9, dim_z = 6)\n gamma = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n attacker = PeriodAttacker(filter = self.filter,\n radar = self.radar, radar_pos = self.radar_pos,\n gamma = gamma,mag_vector = self.mag_vector,\n t0 = self.t0, time = self.time)\n\n def test_initialization_attack_no_effect(self):\n with self.assertWarns(Warning):\n mag_vector = np.array([[0, 0, 0]]).T\n att = PeriodAttacker(filter = self.filter,\n radar = self.radar, radar_pos = self.radar_pos,\n gamma = self.gamma, mag_vector = mag_vector,\n t0 = self.t0, time = self.time)\n\n def test_initialization_given_pos(self):\n # Expected results\n gamma = np.array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n mag_vector = np.array([[1,1,1]]).T\n radar_pos = 0\n\n #Generation\n att = PeriodAttacker(filter = self.filter, radar_pos = radar_pos,\n radar = self.radar,t0 = self.t0, time = self.time)\n computed_gamma = att.gamma\n computed_mag_vector = att.mag_vector\n\n # Comparison\n self.assertTrue(np.array_equal(gamma,computed_gamma))\n self.assertTrue(np.array_equal(mag_vector,computed_mag_vector))\n # =======================================================\n # ========================== Listening/Attack tests ========================\n\n def test_listen_measurement_increments_time(self):\n measurements = [np.ones((3,1))*i for i in range(100)]\n tags = [1]*100\n times = [i/10 for i in range(100)]\n labeled_measurements = [LabeledMeasurement(tag,time,value) for tag,time,value in zip(tags,times,measurements)]\n for i,labeled_measurement in enumerate(labeled_measurements):\n self.assertEqual(self.attacker.current_time,i)\n self.attacker.listen_measurement(labeled_measurement)\n\n def test_listen_measurement_1_step_attack(self):\n measurement = np.array([[10,10,10]]).T\n tag = 1\n time = 1\n labeled_measurement = LabeledMeasurement(tag = tag, time = time, value = measurement)\n\n modified_measurement = np.array([[0,0,0]]).T\n tag = 1\n time = 1\n modified_labeled_measurement = LabeledMeasurement(tag = tag, time = time, value = modified_measurement)\n\n self.attacker.t0 = 0\n computed_measurement = self.attacker.listen_measurement(labeled_measurement)\n self.assertEqual(modified_labeled_measurement,computed_measurement)\n\n def test_listen_measurement_1_step_no_attack(self):\n measurement = np.array([[10,10,10]]).T\n tag = 1\n time = 1\n labeled_measurement = LabeledMeasurement(tag = tag, time = time, value = measurement)\n computed_measurement = self.attacker.listen_measurement(labeled_measurement)\n self.assertEqual(labeled_measurement,computed_measurement)\n\n def test_unattacked_vectors(self):\n measurements = [np.ones((3,1))*i for i in range(100)]\n tags = [1]*100\n times = [i/10 for i in range(100)]\n labeled_measurements = [LabeledMeasurement(tag,time,value) for tag,time,value in zip(tags,times,measurements)]\n\n modified_measurements = []\n for i,labeled_measurement in enumerate(labeled_measurements):\n mod_meas = self.attacker.listen_measurement(labeled_measurement)\n modified_measurements.append(mod_meas)\n\n # Unattacked measurements from 0 to 10\n measurements = [np.ones((3,1))*i for i in range(10)]\n tags = [1]*10\n times = [i/10 for i in range(10)]\n unattacked_measurements1 = [LabeledMeasurement(tag,time,value) for tag,time,value in zip(tags,times,measurements)]\n\n # Unattacked measurements from 60 to 100\n measurements = [np.ones((3,1))*i for i in range(60,100)]\n tags = [1]*40\n times = [i/10 for i in range(60,100)]\n unattacked_measurements2 = [LabeledMeasurement(tag,time,value) for tag,time,value in zip(tags,times,measurements)]\n\n comparison_list_1 = zip(unattacked_measurements1, modified_measurements[0:10])\n comparison_list_2 = zip(unattacked_measurements2, modified_measurements[60:100])\n self.assertTrue(all((meas == mod_meas) for meas, mod_meas in comparison_list_1))\n self.assertTrue(all((meas == mod_meas) for meas, mod_meas in comparison_list_2))\n\n # def test_attacked_vectors(self):\n # measurements = [np.ones((3,1))*i for i in range(100)]\n # tags = [1]*100\n # times = [i/10 for i in range(100)]\n # labeled_measurements = [LabeledMeasurement(tag,time,value) for tag,time,value in zip(tags,times,measurements)]\n #\n # modified_measurements = []\n # for i,labeled_measurement in enumerate(labeled_measurements):\n # mod_meas = self.attacker.listen_measurement(labeled_measurement)\n # modified_measurements.append(mod_meas)\n #\n # attacked_meas = [np.subtract(np.array([[i,i,i,i,i,i]]).T,np.array([[0,0,0,10,10,10]]).T) for i in range(10,60)]\n # comparison_list = zip(attacked_meas, modified_measurements[10:60])\n # self.assertTrue(all([np.allclose(meas, mod_meas) for meas, mod_meas in comparison_list]))\n\n\nclass DOSPeriodAttackerTestCase(PeriodAttackerTestCase):\n def setUp(self):\n PeriodAttackerTestCase.setUp(self)\n self.mag = 1e6\n self.attacker = DOSPeriodAttacker(filter = self.filter, mag = self.mag,\n gamma = self.gamma,mag_vector = self.mag_vector,\n radar_pos = self.radar_pos, radar = self.radar,\n t0 = self.t0, time = self.time)\n\n def test_initialization_no_errors(self):\n self.assertTrue(np.array_equal(self.attacker.gamma,self.gamma))\n self.assertTrue(np.array_equal(self.attacker.mag_vector,self.mag_vector*self.mag))\n self.assertEqual(self.t0,10)\n self.assertEqual(self.time,50)\n\n def test_listen_measurement_1_step_attack(self):\n measurement = np.array([[10,10,10]]).T\n tag = 1\n time = 1\n labeled_measurement = LabeledMeasurement(tag = tag, time = time, value = measurement)\n\n modified_measurement = np.array([[-9.99999e6,-9.99999e6,-9.99999e6]]).T\n tag = 1\n time = 1\n modified_labeled_measurement = LabeledMeasurement(tag = tag, time = time, value = modified_measurement)\n\n\n\n self.attacker.t0 = 0\n computed_measurement = self.attacker.listen_measurement(labeled_measurement)\n self.assertEqual(modified_labeled_measurement,computed_measurement)\n\n# def test_attacked_vectors(self):\n# pass\n\nclass DriftPeriodAttackerTestCase(PeriodAttackerTestCase):\n def setUp(self):\n self.radar_position = 1\n PeriodAttackerTestCase.setUp(self)\n self.attacker = DriftPeriodAttacker(filter = self.filter,\n radar = self.radar, radar_pos = self.radar_position,\n mag_vector = self.mag_vector,\n t0 = self.t0, time = self.time)\n\n def test_initialization_no_errors(self):\n self.assertTrue(np.array_equal(self.attacker.gamma,self.gamma))\n self.assertTrue(np.array_equal(self.attacker.mag_vector,self.mag_vector))\n self.assertEqual(self.t0,10)\n self.assertEqual(self.time,50)\n self.assertEqual(self.radar, Radar(x=10,y=10))\n self.assertTrue(np.array_equal(self.attacker.attack_drift,np.array([[0,0,10]]).T))\n\n def test_listen_measurement_1_step_attack(self):\n measurement = np.array([[10,10,10]]).T\n tag = 1\n time = 1\n labeled_measurement = LabeledMeasurement(tag = tag, time = time, value = measurement)\n\n modified_measurement = np.array([[9.,0.,0.]]).T\n tag = 1\n time = 1\n modified_labeled_measurement = LabeledMeasurement(tag = tag, time = time, value = modified_measurement)\n\n self.attacker.t0 = 0\n computed_measurement = self.attacker.listen_measurement(labeled_measurement)\n print(computed_measurement)\n self.assertEqual(modified_labeled_measurement,computed_measurement)\n\n# def test_attacked_vectors(self):\n# pass\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.allclose",
"numpy.array_equal",
"scipy.linalg.block_diag",
"numpy.linalg.inv",
"numpy.array"
],
[
"numpy.eye",
"numpy.array",
"numpy.array_equal",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yosshi999/tcav-TF2
|
[
"cb785502bb58d5bd9278424bce1fab6275a9c3b1"
] |
[
"tcav/cav_test.py"
] |
[
"\"\"\"\nCopyright 2018 Google LLC\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n https://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport pickle\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import linear_model\nfrom six.moves import range\nfrom tcav.cav import CAV, get_or_train_cav\nfrom tcav.utils import HParams\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import googletest\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string(name='tcav_test_tmpdir', default='/tmp',\n help='Temporary directory for test files')\n\nclass CavTest(googletest.TestCase):\n\n def setUp(self):\n \"\"\"Makes a cav instance and writes it to tmp direcotry.\n\n The cav instance uses preset values.\n \"\"\"\n self.hparams = HParams(\n model_type='linear', alpha=.01, max_iter=1000, tol=1e-3)\n self.concepts = ['concept1', 'concept2']\n self.bottleneck = 'bottleneck'\n self.accuracies = {'concept1': 0.8, 'concept2': 0.5, 'overall': 0.65}\n self.cav_vecs = [[1, 2, 3], [4, 5, 6]]\n\n self.test_subdirectory = os.path.join(FLAGS.tcav_test_tmpdir, 'test')\n self.cav_dir = self.test_subdirectory\n self.cav_file_name = CAV.cav_key(self.concepts, self.bottleneck,\n self.hparams.model_type,\n self.hparams.alpha) + '.pkl'\n self.save_path = os.path.join(self.cav_dir, self.cav_file_name)\n self.cav = CAV(self.concepts, self.bottleneck, self.hparams)\n # pretend that it was trained and cavs are stored\n self.cav.cavs = np.array(self.cav_vecs)\n shape = (1, 3)\n self.acts = {\n concept: {\n self.bottleneck: np.tile(i * np.ones(shape), (4, 1))\n } for i, concept in enumerate(self.concepts)\n }\n\n if os.path.exists(self.cav_dir):\n shutil.rmtree(self.cav_dir)\n os.mkdir(self.cav_dir)\n with tf.io.gfile.GFile(self.save_path, 'w') as pkl_file:\n pickle.dump({\n 'concepts': self.concepts,\n 'bottleneck': self.bottleneck,\n 'hparams': self.hparams,\n 'accuracies': self.accuracies,\n 'cavs': self.cav_vecs,\n 'saved_path': self.save_path\n }, pkl_file)\n\n def test_default_hparams(self):\n hparam = CAV.default_hparams()\n self.assertEqual(hparam.alpha, 0.01)\n self.assertEqual(hparam.model_type, 'linear')\n\n def test_load_cav(self):\n \"\"\"Load up the cav file written in setup function and check values.\n \"\"\"\n cav_instance = CAV.load_cav(self.save_path)\n self.assertEqual(cav_instance.concepts, self.concepts)\n self.assertEqual(cav_instance.cavs, self.cav_vecs)\n\n def test_cav_key(self):\n self.assertEqual(\n self.cav.cav_key(self.concepts, self.bottleneck,\n self.hparams.model_type, self.hparams.alpha),\n '-'.join(self.concepts) + '-' + self.bottleneck + '-' +\n self.hparams.model_type + '-' + str(self.hparams.alpha))\n\n def test_check_cav_exists(self):\n exists = self.cav.check_cav_exists(self.cav_dir, self.concepts,\n self.bottleneck, self.hparams)\n self.assertTrue(exists)\n\n def test__create_cav_training_set(self):\n x, labels, labels2text = self.cav._create_cav_training_set(\n self.concepts, self.bottleneck, self.acts)\n # check values of some elements.\n self.assertEqual(x[0][0], 0)\n self.assertEqual(x[5][0], 1)\n self.assertEqual(labels[0], 0)\n self.assertEqual(labels[5], 1)\n self.assertEqual(labels2text[0], 'concept1')\n\n def test_perturb_act(self):\n perturbed = self.cav.perturb_act(\n np.array([1., 0, 1.]), 'concept1', operation=np.add, alpha=1.0)\n self.assertEqual(2., perturbed[0])\n self.assertEqual(2., perturbed[1])\n self.assertEqual(4., perturbed[2])\n\n def test_get_key(self):\n self.assertEqual(\n CAV.cav_key(self.concepts, self.bottleneck, self.hparams.model_type,\n self.hparams.alpha),\n '-'.join([str(c) for c in self.concepts]) + '-' + self.bottleneck + '-'\n + self.hparams.model_type + '-' + str(self.hparams.alpha))\n\n def test_get_direction(self):\n idx_concept1 = self.cav.concepts.index('concept1')\n cav_directly_from_member = self.cav.cavs[idx_concept1]\n cav_via_get_direction = self.cav.get_direction('concept1')\n for i in range(len(cav_directly_from_member)):\n self.assertEqual(cav_directly_from_member[i], cav_via_get_direction[i])\n\n def test_train(self):\n self.cav.train({c: self.acts[c] for c in self.concepts})\n # check values of some elements.\n # the two coefficients of the classifier must be negative.\n self.assertLess(self.cav.cavs[0][0] * self.cav.cavs[1][0], 0)\n\n def test__train_lm(self):\n lm = linear_model.SGDClassifier(alpha=self.hparams.alpha)\n acc = self.cav._train_lm(lm, np.array([[0], [0], [0], [1], [1], [1]]),\n np.array([0, 0, 0, 1, 1, 1]), {\n 0: 0,\n 1: 1\n })\n # the given data is so easy it should get this almost perfect.\n self.assertGreater(acc[0], 0.99)\n self.assertGreater(acc[1], 0.99)\n\n def test_get_or_train_cav_save_test(self):\n cav_instance = get_or_train_cav(\n self.concepts,\n self.bottleneck,\n self.acts,\n cav_dir=self.cav_dir,\n cav_hparams=self.hparams)\n # check values of some elements.\n self.assertEqual(cav_instance.cavs[0][0], self.cav_vecs[0][0])\n self.assertEqual(cav_instance.cavs[1][2], self.cav_vecs[1][2])\n\n\nif __name__ == '__main__':\n googletest.main()\n"
] |
[
[
"tensorflow.io.gfile.GFile",
"numpy.ones",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.platform.flags.DEFINE_string",
"numpy.array",
"sklearn.linear_model.SGDClassifier"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12",
"1.0",
"1.2",
"1.4"
]
}
] |
markus583/alibi
|
[
"ee709d6296b0d803707bce2ed8a47488cd9e9cee"
] |
[
"alibi/utils/visualization.py"
] |
[
"import numpy as np\nimport warnings\nfrom enum import Enum\nfrom matplotlib import pyplot as plt\nfrom matplotlib.pyplot import figure, axis\nfrom matplotlib.figure import Figure\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom numpy import ndarray\nfrom typing import Union, Tuple\n\n\n# the following code was borrowed from the captum library in\n# https://github.com/pytorch/captum/blob/master/captum/attr/_utils/visualization.py\nclass ImageVisualizationMethod(Enum):\n heat_map = 1\n blended_heat_map = 2\n original_image = 3\n masked_image = 4\n alpha_scaling = 5\n\n\nclass VisualizeSign(Enum):\n positive = 1\n absolute_value = 2\n negative = 3\n all = 4\n\n\ndef _prepare_image(attr_visual: ndarray):\n return np.clip(attr_visual.astype(int), 0, 255)\n\n\ndef _normalize_scale(attr: ndarray, scale_factor: float):\n if abs(scale_factor) < 1e-5:\n warnings.warn(\n \"Attempting to normalize by value approximately 0, skipping normalization.\"\n \"This likely means that attribution values are all close to 0.\"\n )\n return np.clip(attr, -1, 1)\n attr_norm = attr / scale_factor\n return np.clip(attr_norm, -1, 1)\n\n\ndef _cumulative_sum_threshold(values: ndarray, percentile: Union[int, float]):\n # given values should be non-negative\n assert percentile >= 0 and percentile <= 100, (\n \"Percentile for thresholding must be \" \"between 0 and 100 inclusive.\"\n )\n sorted_vals = np.sort(values.flatten())\n cum_sums = np.cumsum(sorted_vals)\n threshold_id = np.where(cum_sums >= cum_sums[-1] * 0.01 * percentile)[0][0]\n return sorted_vals[threshold_id]\n\n\ndef _normalize_image_attr(\n attr: ndarray, sign: str, outlier_perc: Union[int, float] = 2\n):\n attr_combined = np.sum(attr, axis=2)\n # Choose appropriate signed values and rescale, removing given outlier percentage.\n if VisualizeSign[sign] == VisualizeSign.all:\n threshold = _cumulative_sum_threshold(np.abs(attr_combined), 100 - outlier_perc)\n elif VisualizeSign[sign] == VisualizeSign.positive:\n attr_combined = (attr_combined > 0) * attr_combined\n threshold = _cumulative_sum_threshold(attr_combined, 100 - outlier_perc) # type: ignore\n elif VisualizeSign[sign] == VisualizeSign.negative:\n attr_combined = (attr_combined < 0) * attr_combined\n threshold = -1 * _cumulative_sum_threshold(\n np.abs(attr_combined), 100 - outlier_perc\n )\n elif VisualizeSign[sign] == VisualizeSign.absolute_value:\n attr_combined = np.abs(attr_combined)\n threshold = _cumulative_sum_threshold(attr_combined, 100 - outlier_perc) # type: ignore\n else:\n raise AssertionError(\"Visualize Sign type is not valid.\")\n return _normalize_scale(attr_combined, threshold) # type: ignore\n\n\ndef visualize_image_attr(\n attr: ndarray,\n original_image: Union[None, ndarray] = None,\n method: str = \"heat_map\",\n sign: str = \"absolute_value\",\n plt_fig_axis: Union[None, Tuple[figure, axis]] = None,\n outlier_perc: Union[int, float] = 2,\n cmap: Union[None, str] = None,\n alpha_overlay: float = 0.5,\n show_colorbar: bool = False,\n title: Union[None, str] = None,\n fig_size: Tuple[int, int] = (6, 6),\n use_pyplot: bool = True,\n):\n r\"\"\"\n Visualizes attribution for a given image by normalizing attribution values\n of the desired sign (positive, negative, absolute value, or all) and displaying\n them using the desired mode in a matplotlib figure.\n\n Parameters\n ----------\n\n attr\n Numpy array corresponding to attributions to be\n visualized. Shape must be in the form (H, W, C), with\n channels as last dimension. Shape must also match that of\n the original image if provided.\n original_image\n Numpy array corresponding to\n original image. Shape must be in the form (H, W, C), with\n channels as the last dimension. Image can be provided either\n with float values in range 0-1 or int values between 0-255.\n This is a necessary argument for any visualization method\n which utilizes the original image.\n method\n Chosen method for visualizing attribution.\n Supported options are:\n 1. `heat_map` - Display heat map of chosen attributions\n 2. `blended_heat_map` - Overlay heat map over greyscale\n version of original image. Parameter alpha_overlay\n corresponds to alpha of heat map.\n 3. `original_image` - Only display original image.\n 4. `masked_image` - Mask image (pixel-wise multiply)\n by normalized attribution values.\n 5. `alpha_scaling` - Sets alpha channel of each pixel\n to be equal to normalized attribution value.\n Default: `heat_map`\n sign\n Chosen sign of attributions to visualize. Supported\n options are:\n 1. `positive` - Displays only positive pixel attributions.\n 2. `absolute_value` - Displays absolute value of\n attributions.\n 3. `negative` - Displays only negative pixel attributions.\n 4. `all` - Displays both positive and negative attribution\n values. This is not supported for `masked_image` or\n `alpha_scaling` modes, since signed information cannot\n be represented in these modes.\n\n plt_fig_axis\n Tuple of matplotlib.pyplot.figure and axis\n on which to visualize. If None is provided, then a new figure\n and axis are created.\n\n outlier_perc\n Top attribution values which\n correspond to a total of outlier_perc percentage of the\n total attribution are set to 1 and scaling is performed\n using the minimum of these values. For sign=`all`, outliers a\n nd scale value are computed using absolute value of\n attributions.\n\n cmap\n String corresponding to desired colormap for\n heatmap visualization. This defaults to \"Reds\" for negative\n sign, \"Blues\" for absolute value, \"Greens\" for positive sign,\n and a spectrum from red to green for all. Note that this\n argument is only used for visualizations displaying heatmaps.\n\n alpha_overlay\n Alpha to set for heatmap when using\n `blended_heat_map` visualization mode, which overlays the\n heat map over the greyscaled original image.\n\n show_colorbar\n Displays colorbar for heatmap below\n the visualization. If given method does not use a heatmap,\n then a colormap axis is created and hidden. This is\n necessary for appropriate alignment when visualizing\n multiple plots, some with colorbars and some without.\n\n title\n Title string for plot. If None, no title is set.\n\n fig_size\n Size of figure created.\n\n use_pyplot\n If true, uses pyplot to create and show\n figure and displays the figure after creating. If False,\n uses Matplotlib object oriented API and simply returns a\n figure object without showing.\n\n Returns\n -------\n 2-element tuple of **figure**, **axis**:\n - **figure** (*matplotlib.pyplot.figure*):\n Figure object on which visualization\n is created. If plt_fig_axis argument is given, this is the\n same figure provided.\n - **axis** (*matplotlib.pyplot.axis*):\n Axis object on which visualization\n is created. If plt_fig_axis argument is given, this is the\n same axis provided.\n\n \"\"\"\n # Create plot if figure, axis not provided\n if plt_fig_axis is not None:\n plt_fig, plt_axis = plt_fig_axis\n else:\n if use_pyplot:\n plt_fig, plt_axis = plt.subplots(figsize=fig_size)\n else:\n plt_fig = Figure(figsize=fig_size)\n plt_axis = plt_fig.subplots()\n\n if original_image is not None:\n if np.max(original_image) <= 1.0:\n original_image = _prepare_image(original_image * 255)\n else:\n assert (\n ImageVisualizationMethod[method] == ImageVisualizationMethod.heat_map\n ), \"Original Image must be provided for any visualization other than heatmap.\"\n\n # Remove ticks and tick labels from plot.\n plt_axis.xaxis.set_ticks_position(\"none\")\n plt_axis.yaxis.set_ticks_position(\"none\")\n plt_axis.set_yticklabels([])\n plt_axis.set_xticklabels([])\n\n heat_map = None\n # Show original image\n if ImageVisualizationMethod[method] == ImageVisualizationMethod.original_image:\n plt_axis.imshow(original_image)\n else:\n # Choose appropriate signed attributions and normalize.\n norm_attr = _normalize_image_attr(attr, sign, outlier_perc)\n\n # Set default colormap and bounds based on sign.\n if VisualizeSign[sign] == VisualizeSign.all:\n default_cmap = LinearSegmentedColormap.from_list(\n \"RdWhGn\", [\"red\", \"white\", \"green\"]\n )\n vmin, vmax = -1, 1\n elif VisualizeSign[sign] == VisualizeSign.positive:\n default_cmap = \"Greens\"\n vmin, vmax = 0, 1\n elif VisualizeSign[sign] == VisualizeSign.negative:\n default_cmap = \"Reds\"\n vmin, vmax = 0, 1\n elif VisualizeSign[sign] == VisualizeSign.absolute_value:\n default_cmap = \"Blues\"\n vmin, vmax = 0, 1\n else:\n raise AssertionError(\"Visualize Sign type is not valid.\")\n cmap = cmap if cmap is not None else default_cmap\n\n # Show appropriate image visualization.\n if ImageVisualizationMethod[method] == ImageVisualizationMethod.heat_map:\n heat_map = plt_axis.imshow(norm_attr, cmap=cmap, vmin=vmin, vmax=vmax)\n elif (\n ImageVisualizationMethod[method]\n == ImageVisualizationMethod.blended_heat_map\n ):\n plt_axis.imshow(np.mean(original_image, axis=2), cmap=\"gray\")\n heat_map = plt_axis.imshow(\n norm_attr, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha_overlay\n )\n elif ImageVisualizationMethod[method] == ImageVisualizationMethod.masked_image:\n assert VisualizeSign[sign] != VisualizeSign.all, (\n \"Cannot display masked image with both positive and negative \"\n \"attributions, choose a different sign option.\"\n )\n plt_axis.imshow(\n _prepare_image(original_image * np.expand_dims(norm_attr, 2))\n )\n elif ImageVisualizationMethod[method] == ImageVisualizationMethod.alpha_scaling:\n assert VisualizeSign[sign] != VisualizeSign.all, (\n \"Cannot display alpha scaling with both positive and negative \"\n \"attributions, choose a different sign option.\"\n )\n plt_axis.imshow(\n np.concatenate(\n [\n original_image,\n _prepare_image(np.expand_dims(norm_attr, 2) * 255),\n ],\n axis=2,\n )\n )\n else:\n raise AssertionError(\"Visualize Method type is not valid.\")\n\n # Add colorbar. If given method is not a heatmap and no colormap is relevant,\n # then a colormap axis is created and hidden. This is necessary for appropriate\n # alignment when visualizing multiple plots, some with heatmaps and some\n # without.\n if show_colorbar:\n axis_separator = make_axes_locatable(plt_axis)\n colorbar_axis = axis_separator.append_axes(\"bottom\", size=\"5%\", pad=0.1)\n if heat_map:\n plt_fig.colorbar(heat_map, orientation=\"horizontal\", cax=colorbar_axis)\n else:\n colorbar_axis.axis(\"off\")\n if title:\n plt_axis.set_title(title)\n\n if use_pyplot:\n plt.show()\n\n return plt_fig, plt_axis\n"
] |
[
[
"numpy.expand_dims",
"numpy.abs",
"numpy.clip",
"matplotlib.figure.Figure",
"numpy.cumsum",
"matplotlib.pyplot.subplots",
"numpy.max",
"numpy.mean",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.where",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarkMoretto/python-examples-main
|
[
"37b8c41d2f175029f4536ca970f037ff19b4e951",
"37b8c41d2f175029f4536ca970f037ff19b4e951"
] |
[
"clustering/old_patient_clustering_script.py",
"notebook-samples/unsupervised/pred_electricity_consumption.py"
] |
[
"\n\"\"\"\nTopic: Claims Grouping Analysis Exercise\nAuthor: Mark Moretto\nDate Created: 05/10/2018\n\"\"\"\n\n\nimport gc\nimport os\nimport sys\nimport pyodbc\nimport random\nimport textwrap\nimport warnings\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom time import sleep\nfrom copy import deepcopy\nfrom datetime import timedelta\nfrom contextlib import suppress\n\nfrom subprocess import call\nfrom subprocess import check_output\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.exceptions import DataConversionWarning\n\nhomeDir = r'C:\\path\\to\\project\\dir'\nsys.path.append(homeDir)\n\nfrom sklearn.cluster import KMeans\nimport BIC\n\ndef checkForBIC():\n modName = r'BIC'\n if modName not in sys.modules:\n try:\n from ..HelperScripts import BIC\n except:\n from .HelperScripts import BIC\n if modName not in sys.modules:\n try:\n from MyModules.Stats import BIC\n except ImportError:\n print('Warning! Module {} not imported!'.format(modName))\n\n\npd.options.mode.chained_assignment = None\npd.set_option('display.max_colwidth', -1)\npd.set_option('display.float_format', lambda x: '%.4f' % x)\ngc.enable()\n\nmainDir = r'C:\\path\\to\\project\\dir' # Folder for main project\ntempOutPath = r'C:\\path\\to\\temporary\\dir' # Folder for holding temporary files.\neLog = r'C:\\path\\to\\project\\dir\\errors.txt'\n\nmySchema = r'MyTestDB'\ntblName = 'DataClusters'\n\nquery1 = textwrap.dedent(\"\"\"\n\"\"\")\n\n\ndef getSqlData(input_query=query1):\n size = 0\n chunksize = 100000\n chunkList = []\n print('Reading data from SQL Server...')\n server = 'my_server'\n database = 'MyTestDB'\n conn = pyodbc.connect((r'DRIVER={SQL Server Native Client 11.0};SERVER={server=};DATABASE={database=};MARS_Connection=Yes;Trusted_Connection=yes'))\n conn.autocommit = True\n cursor = conn.cursor()\n cursor.execute(input_query)\n conn.commit()\n query2 = \"SELECT * FROM ##_temp_table\"\n print('Current progress:')\n for chunk in pd.read_sql_query(query2, conn, chunksize=chunksize):\n size += chunksize\n print('\\t~{:,} rows read'.format(size))\n chunkList.append(chunk)\n df = pd.concat(chunkList, axis=0)\n conn.close()\n return df\n\n\ndef timeConversion(float_time):\n if type(float_time) in [float, int]:\n float_time = np.float(float_time)\n timeMain = str(timedelta(seconds=float_time))\n t_h, t_m, t_s = timeMain.split('.')[0].split(':')\n if t_h == '0':\n print('\\nTime elapsed: {} minutes, {} seconds'.format(int(t_m), int(t_s)))\n elif t_m == '0':\n print('\\nTime elapsed: {} seconds'.format(int(t_s)))\n elif t_s == '0':\n print('\\nError! Issue with data import!')\n else:\n print('\\nTime elapsed: {} hours, {} minutes, {} seconds'.format(int(t_h), int(t_m), int(t_s)))\n else:\n print('\\nError! Time needs to be in float format!')\n\n\ndef trincateClusterTable(table_path):\n truncateTblSql = r'TRUNCATE TABLE {}'.format(table_path)\n server = 'my_server'\n database = 'MyTestDB'\n conn = pyodbc.connect((r'DRIVER={SQL Server Native Client 11.0};SERVER={server=};DATABASE={database=};MARS_Connection=Yes;Trusted_Connection=yes'))\n conn.autocommit = True\n cursor = conn.cursor()\n cursor.execute(truncateTblSql)\n conn.commit()\n conn.close()\n\n\ndef importData():\n t_start = time()\n data = getSqlData()\n t_end = time() - t_start\n timeConversion(t_end)\n return data\n\n\ndef cleanData():\n\n from sklearn.preprocessing import Imputer\n impute = Imputer(missing_values='NaN', strategy='median', axis=0)\n\n mainDF = importData()\n # dataDF = getSqlData()\n listDict = {k:v for k, v in enumerate(mainDF)}\n toExclude = [0]\n df = mainDF[[listDict[k] for k in listDict if k not in toExclude]]\n\n X_array = impute.fit_transform(df)\n return X_array, mainDF\n\ndef runGMM(X):\n\n GMM_cov_types = [\n 'spherical', \n 'full'\n ]\n scoreMetrics = [\n 'braycurtis',\n 'canberra',\n 'chebyshev',\n 'correlation',\n 'cosine',\n 'euclidean',\n 'hamming',\n 'l1',\n 'l2',\n 'kulsinski',\n 'manhattan',\n 'minkowski',\n 'rogerstanimoto',\n 'sqeuclidean'\n ]\n metDict = {k + 1: v for k, v in enumerate(scoreMetrics)}\n randMetrics = random.sample(range((len(metDict.keys())) + 1), 5)\n # print('\\nThe randomly selected evaluation metrics are: \\n\\t\\'{}\\''.format('\\', \\n\\t\\''.join([metDict[k] for k in metDict.keys() if k in randMetrics])))\n\n eval_list = []\n lowBic = np.infty\n lowScore = np.infty\n\n dd = dict()\n ed = dict()\n\n def warnList():\n warnings.warn(\"ConversionWarning\", DataConversionWarning)\n warnings.warn(\"ConvergeWarning\", ConvergenceWarning)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n warnList()\n np.seterr(divide='ignore', invalid='ignore')\n\n for item in GMM_cov_types:\n print('\\nCurrently evaluating with covariance \\'{}\\''.format(item))\n dd[item] = dict()\n ed[item] = dict()\n t_start_o = time()\n gmm = GaussianMixture(n_components=10, covariance_type=item, max_iter=100)\n gmm.fit(X)\n # tempLabel = gmm.predict(X)\n # gc.collect(2)\n # for k in metDict.keys():\n # if k in randMetrics:\n # dd[item][metDict[k]] = dict()\n # silScore = silhouette_score(X, labels=tempLabel, metric = str(metDict[k]), sample_size = 1000)\n # print(\"\\tThe avg. silhouette score for metric \\'{}\\' is: {:0.3f}\".format(metDict[k], silScore))\n # dd[item][metDict[k]] = silScore\n vBic = gmm.bic(X)\n vAIC = gmm.aic(X)\n print('The BIC for \\'{}\\' is: {:<35.6f}'.format(item, vBic))\n eval_list.append(vBic)\n ed[item] = vBic\n dd[item]['bic'] = vBic\n dd[item]['aic'] = vAIC\n t_end_o = time() - t_start_o\n timeConversion(t_end_o)\n if eval_list[-1] < lowBic:\n lowBic = eval_list[-1]\n bestModel = gmm\n gc.collect(2)\n\n # labelsGMM = bestModel.predict(X)\n # labels = labelsGMM + 1\n return bestModel, dd, ed\n\n\ndef runKMeans(X):\n\n _bic = BIC.compute_bic\n\n eval_list = []\n lowBIC = np.infty\n\n thresholds = [0.0001, 0.0002, 0.0005]\n dd = dict()\n ed = dict()\n\n for thresh in thresholds:\n dd[thresh] = dict()\n ed[thresh] = dict()\n t_start = time()\n print('\\nNow running K-Means with threshold \\'{}\\''.format(thresh))\n kmm = KMeans(n_clusters=10, max_iter=100, tol=thresh)\n kmm.fit(X)\n vBIC = _bic(kmm, X)\n print('\\tThe BIC for threshold \\'{}\\' is: {:<20.6f}'.format(thresh, vBIC))\n eval_list.append(vBIC)\n ed[thresh] = vBIC\n dd[thresh]['bic'] = dict()\n dd[thresh]['bic'] = vBIC\n t_end = time() - t_start\n timeConversion(t_end)\n if eval_list[-1] < lowBIC:\n lowBIC = eval_list[-1]\n bestModel = kmm\n\n return bestModel, dd, ed\n\n\ndef modelCompare():\n\n X, df_orig = cleanData()\n ### Capture the model and relvant dictionary from each run\n bestGMM, _, eDictGMM = runGMM(X)\n bestKMM, _, eDictKMM = runKMeans(X)\n\n ### Get minimum BIC values from each model\n Key_GMM = min(eDictGMM, key=eDictGMM.get)\n Key_KMM = min(eDictKMM, key=eDictKMM.get)\n\n ### Determine the lowest BIC and make that the selected model\n if eDictGMM[Key_GMM] < eDictKMM[Key_KMM]:\n bestModel = bestGMM\n modelName = 'Gaussian Mixed'\n else:\n bestModel = bestKMM\n modelName = 'K-Means'\n\n ### Run the model to get the new labels and add 1 to bring them\n ### into a range of 1 - 10\n vLabels = bestModel.predict(X)\n labelOut = vLabels + 1\n\n ### Return the reformatted labels and model name\n return labelOut, modelName, df_orig\n\n\ndef reformatAndLoad():\n\n labels, topModelName, main_df = modelCompare()\n\n main_df['Groups'] = labels.ravel()[:]\n df_out = main_df[['DurableKey','Groups']]\n df_out['DurableKey'] = df_out['DurableKey'].apply(lambda x: str(x).strip())\n\n return df_out, topModelName\n\ndef loadToSSMS(df, schema=mySchema, tableName=tblName, path_out=tempOutPath):\n\n server = r'ClaritySNAP'\n database = r'EDWUsersDB'\n\n shortTable = '{}.{}'.format(schema, tableName)\n tableNameFull = '{}.{}.{}'.format(database, schema, tableName)\n tableNameBracket = '[{}].[{}].[{}]'.format(database, schema, tableName)\n\n cFormatFile = r'C:\\Users\\mmorett1\\Desktop\\formatFiles\\{}_Format.fmt'.format(tableName)\n call('bcp {} format nul -T -c -t` -S {} -f \"{}\"'.format(tableNameFull, server, cFormatFile))\n\n tempFileName = '{}_temp.txt'.format(tableName)\n outPathFull = os.path.join(path_out, tempFileName)\n df.to_csv(outPathFull, sep='`', index=False)\n\n print('Clearing out old data...')\n trincateClusterTable(tableNameBracket)\n\n print('Loading new data...')\n g = call('bcp {} in \"{}\" -S {} -T -t` -d {} -f \"{}\" -e \"{}\" -F 2 -r \"0x0a\"'.format(shortTable, outPathFull, server, database, cFormatFile, eLog))\n if g != 0:\n raise BaseException(\"Failed to load data!\")\n \n\n print('\\nData load complete!')\n return outPathFull\n\ndef main():\n checkForBIC()\n # X, df = cleanData()\n # labl, modName, df = modelCompare()\n df, modName = reformatAndLoad()\n outPath = loadToSSMS(df)\n print('{} refresh complete!'.format(tblName))\n print('Today\\'s best model was: \\'{}\\''.format(modName))\n with suppress(FileNotFoundError):\n os.remove(outPath)\n input('Press any key to exit.')\n sleep(1)\n print('Goodbye!')\n sleep(1)\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n",
"\n\"\"\"\nPurpose: Unsupervised learning sampler\nDate created: 2020-11-06\n\nRef repo: https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries\n\nLocal folder: C:/Users/Work1/Desktop/Info/GitHub/python-examples-main/notebook-samples/unsupervised\n\nContributor(s):\n Mark M.\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\n# Set local folder if developing/debugging\nmyuser = os.environ[\"username\"]\nPROJECT_FOLDER = Path(rf\"C:\\Users\\{myuser}\\Desktop\\Info\\GitHub\\python-examples-main\\notebook-samples\\unsupervised\")\nos.chdir(PROJECT_FOLDER)\n\n\nfrom UnsupervisedTSRepo import scikit_wrappers\n\nimport gc\nimport zipfile\nimport requests\nfrom io import BytesIO, StringIO\n\n# Data sci and dat processing imports\nimport scipy as sp\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport sklearn\nfrom sklearn import cluster\nfrom sklearn import neighbors\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\npd.set_option(\"mode.chained_assignment\", None)\npd.set_option(\"display.width\", 120)\npd.set_option(\"display.date_yearfirst\", True)\npd.set_option(\"display.max_colwidth\", None)\npd.set_option(\"display.max_columns\", None)\npd.set_option(\"display.max_info_rows\", 10000)\n\ngc.enable()\n\n# Check for CUDA\nCUDA_TF: bool = False\nif torch.cuda.is_available():\n print(\"Using CUDA...\")\n CUDA_TF = True\n\nGPU = 0\n\n\n\nzip_url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip\"\n\ndef import_zipfile_data(URL = zip_url):\n with requests.Session() as s:\n tmp = s.get(URL)\n with zipfile.ZipFile(BytesIO(tmp.content)) as zfo:\n with zfo.open(\"household_power_consumption.txt\") as zfi:\n tmp = StringIO(zfi.read().decode(\"utf-8\"))\n data_ = pd.read_csv(tmp, sep=\";\", decimal=\",\", header=0, low_memory=False)\n del tmp\n return data_\n\n\ndata = import_zipfile_data(zip_url)\n\ndata.loc[:, \"Date\"] = pd.to_datetime(data.loc[:, \"Date\"], yearfirst=True)\ndata.loc[:, \"Time\"] = pd.to_datetime(data.loc[:, \"Time\"], format=\"%H:%M:%S\").dt.time\n\n\n#dataset = data.transpose(pd.array(data))[2].reshape(1, 1, -1)\n\n\n# Update missing values with the \"last seen\" value.\n# This probably works better for timeseries than other data\n# since order is important here.\ndataset = np.transpose(np.array(data))[2].reshape(1, 1, -1)\nfor idx in range(np.shape(dataset)[2]):\n if dataset[0, 0, idx] == \"?\":\n dataset[0, 0, idx] = dataset[0, 0, idx - 1]\ndataset = dataset.astype(np.float32)\n\n\n# Create training and testing sets\ntrain = dataset[:, :, :500000]\ntest = dataset[:, :, 500000:]\n\n# Normalization\n\nmu_ = np.mean(dataset)\nsigma_ = np.std(dataset)\n\nnormalize = lambda d, mean, sigma: (d - mean) / sigma\n\ndataset = normalize(dataset, mu_, sigma_)\ntrain = normalize(train, mu_, sigma_)\ntest = normalize(test, mu_, sigma_)\n\nprint(f\"Normalized data set metrics:\\n\\tMean: {np.mean(dataset)}\\n\\tVariance: {np.var(dataset)}\")\n\n# Feature learning\n\n# Train new model?\ntraining = True\n\nmodel_path = PROJECT_FOLDER.joinpath(r\"data\\HouseholdPowerConsumption_yearly\")\n\n# hyperparams = {\n# \"batch_size\": 1,\n# \"channels\": 30,\n# \"compared_length\": None,\n# \"depth\": 10,\n# \"nb_steps\": 400,\n# \"in_channels\": 1,\n# \"kernel_size\": 3,\n# \"penalty\": None,\n# \"early_stopping\": None,\n# \"lr\": 0.001,\n# \"nb_random_samples\": 10,\n# \"negative_penalty\": 1,\n# \"out_channels\": 160,\n# \"reduced_size\": 80,\n# \"cuda\": CUDA_TF,\n# \"gpu\": GPU\n# }\n\n# encoder_yearly = scikit_wrappers.CausalCNNEncoderClassifier()\n# encoder_yearly.set_params(**hyperparams)\n\n\n# if training:\n# encoder_yearly.fit_encoder(train, save_memory=True, verbose=True)\n# encoder_yearly.save_encoder(model_path.as_posix())\n# else:\n# encoder_yearly.load_encoder(model_path.as_posix())\n\n\n\ntorch.cuda.empty_cache()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\" For local zipfile data\nfrom io import StringIO\nwith zipfile.ZipFile(\"household_power_consumption.zip\") as zfo:\n with zfo.open(\"household_power_consumption.txt\") as zfi:\n tmp = StringIO(zfi.read().decode(\"utf-8\"))\n data = pd.read_csv(tmp, sep=\";\", decimal=\",\", header=0, low_memory=False)\n del tmp\n\"\"\"\n\n\"\"\"\nimport hmac\nimport pickle\nimport hashlib\nimport binascii\ndef create_sha256_signature(key, message):\n byte_key = binascii.unhexlify(key)\n message = message.encode()\n return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()\n\n\ncreate_sha256_signature(\"E49756B4C8FAB4E48222A3E7F3B97CC3\", \"TEST STRING\")\n\"\"\"\n"
] |
[
[
"pandas.concat",
"pandas.read_sql_query",
"sklearn.cluster.KMeans",
"sklearn.preprocessing.Imputer",
"numpy.seterr",
"sklearn.mixture.GaussianMixture",
"pandas.set_option",
"numpy.float"
],
[
"pandas.to_datetime",
"pandas.read_csv",
"torch.cuda.empty_cache",
"numpy.std",
"numpy.mean",
"numpy.shape",
"torch.cuda.is_available",
"numpy.var",
"pandas.set_option",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ijufumi/demo-python
|
[
"b48bdebde172ca581a48346a77b12c30ff202e73"
] |
[
"result_of_deep-learning-from-scratch/chap4/4_4_2.py"
] |
[
"import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nfrom common.functions import softmax, cross_entropy_error\nfrom common.gradient import numerical_gradient\n\nclass simpleNet:\n def __init__(self):\n self.W = np.random.randn(2, 3)\n\n def predict(self, x):\n return np.dot(x, self.W)\n\n def loss(self, x, t):\n z = self.predict(x)\n y = softmax(z)\n loss = cross_entropy_error(y, t)\n\n return loss\n\n\nnet = simpleNet()\nprint(net.W)\n\nx = np.array([0.6, 0.9])\np = net.predict(x)\nprint(p)\n\nmax = np.argmax(p)\nprint(max)\n\nt = np.array([0, 0, 1])\nloss = net.loss(x, t)\nprint(loss)\n\ndef f(W):\n return net.loss(x, t)\n\ndw = numerical_gradient(f, net.W)\nprint(dw)"
] |
[
[
"numpy.dot",
"numpy.array",
"numpy.random.randn",
"numpy.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZeusNightBolt/DIGITS
|
[
"3450cc683143415418af5ecdb1b17b02da3e2c79"
] |
[
"tools/create_db.py"
] |
[
"#!/usr/bin/env python2\n# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.\n\nimport sys\nimport os\nimport time\nimport argparse\nimport logging\nimport re\nimport shutil\nimport math\nimport random\nfrom collections import Counter\nimport threading\nimport Queue\n\n# Add path for DIGITS package\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport digits.config\ndigits.config.load_config()\nfrom digits import utils, log\n\nimport numpy as np\nimport PIL.Image\nimport lmdb\nimport h5py\nfrom cStringIO import StringIO\n# must call digits.config.load_config() before caffe to set the path\nimport caffe.io\nimport caffe_pb2\n\nlogger = logging.getLogger('digits.tools.create_db')\n\nclass Error(Exception):\n pass\n\nclass BadInputFileError(Error):\n \"\"\"Input file is empty\"\"\"\n pass\n\nclass ParseLineError(Error):\n \"\"\"Failed to parse a line in the input file\"\"\"\n pass\n\nclass LoadError(Error):\n \"\"\"Failed to load image[s]\"\"\"\n pass\n\nclass WriteError(Error):\n \"\"\"Failed to write image[s]\"\"\"\n pass\n\nclass Hdf5DatasetExtendError(Error):\n \"\"\"Failed to extend an hdf5 dataset\"\"\"\n pass\n\n\nclass DbWriter(object):\n \"\"\"\n Abstract class for writing to databases\n \"\"\"\n\n def __init__(self, output_dir, image_height, image_width, image_channels):\n self._dir = output_dir\n os.makedirs(output_dir)\n self._image_height = image_height\n self._image_width = image_width\n self._image_channels = image_channels\n self._count = 0\n\n def write_batch(self, batch):\n raise NotImplementedError\n\n def count(self):\n return self._count\n\nclass LmdbWriter(DbWriter):\n # TODO\n pass\n\nclass Hdf5Writer(DbWriter):\n \"\"\"\n A class for writing to HDF5 files\n \"\"\"\n LIST_FILENAME = 'list.txt'\n DTYPE = 'float32'\n\n def __init__(self, **kwargs):\n \"\"\"\n Keyword arguments:\n compression -- the type of dataset compression\n dset_limit -- the dataset size limit\n \"\"\"\n self._compression = kwargs.pop('compression', None)\n self._dset_limit = kwargs.pop('dset_limit', None)\n super(Hdf5Writer, self).__init__(**kwargs)\n self._db = None\n\n if self._dset_limit is not None:\n self._max_count = self._dset_limit / (\n self._image_height * self._image_width * self._image_channels)\n else:\n self._max_count = None\n\n def write_batch(self, batch):\n # convert batch to numpy arrays\n if batch[0][0].ndim == 2:\n # add channel axis for grayscale images\n data_batch = np.array([i[0][...,np.newaxis] for i in batch])\n else:\n data_batch = np.array([i[0] for i in batch])\n # Transpose to (channels, height, width)\n data_batch = data_batch.transpose((0,3,1,2))\n label_batch = np.array([i[1] for i in batch])\n\n\n # first batch\n if self._db is None:\n self._create_new_file(len(batch))\n self._db['data'][:] = data_batch\n self._db['label'][:] = label_batch\n self._count += len(batch)\n return\n\n current_count = self._db['data'].len()\n\n # will fit in current dataset\n if current_count + len(batch) <= self._max_count:\n self._db['data'].resize(current_count+len(batch),axis=0)\n self._db['label'].resize(current_count+len(batch),axis=0)\n self._db['data'][-len(batch):] = data_batch\n self._db['label'][-len(batch):] = label_batch\n self._count += len(batch)\n return\n\n # calculate how many will fit in current dataset\n split = self._max_count - current_count\n\n if split > 0:\n # put what we can into the current dataset\n self._db['data'].resize(self._max_count,axis=0)\n self._db['label'].resize(self._max_count,axis=0)\n self._db['data'][-split:] = data_batch[:split]\n self._db['label'][-split:] = label_batch[:split]\n self._count += split\n\n self._create_new_file(len(batch) - split)\n self._db['data'][:] = data_batch[split:]\n self._db['label'][:] = label_batch[split:]\n self._count += len(batch) - split\n\n def _create_new_file(self, initial_count):\n assert self._max_count is None or initial_count <= self._max_count, \\\n 'Your batch size is too large for your dataset limit - %d vs %d' % \\\n (initial_count, self._max_count)\n\n # close the old file\n if self._db is not None:\n self._db.close()\n mode = 'a'\n else:\n mode = 'w'\n\n # get the filename\n filename = self._new_filename()\n logger.info('Creating HDF5 database at \"%s\" ...' %\n os.path.join(*filename.split(os.sep)[-2:]))\n\n # update the list\n with open(self._list_filename(), mode) as outfile:\n outfile.write('%s\\n' % filename)\n\n # create the new file\n self._db = h5py.File(os.path.join(self._dir, filename), 'w')\n\n # initialize the datasets\n self._db.create_dataset('data',\n (initial_count,self._image_channels,\n self._image_height,self._image_width),\n maxshape=(self._max_count,self._image_channels,\n self._image_height,self._image_width),\n chunks=True, compression=self._compression, dtype=self.DTYPE)\n self._db.create_dataset('label',\n (initial_count,),\n maxshape=(self._max_count,),\n chunks=True, compression=self._compression, dtype=self.DTYPE)\n\n\n def _list_filename(self):\n return os.path.join(self._dir, self.LIST_FILENAME)\n\n def _new_filename(self):\n return '%s.h5' % self.count()\n\n\ndef create_db(input_file, output_dir,\n image_width, image_height, image_channels,\n backend,\n resize_mode = None,\n image_folder = None,\n shuffle = True,\n mean_files = None,\n **kwargs):\n \"\"\"\n Create a database of images from a list of image paths\n Raises exceptions on errors\n\n Arguments:\n input_file -- a textfile containing labelled image paths\n output_dir -- the location to store the created database\n image_width -- image resize width\n image_height -- image resize height\n image_channels -- image channels\n backend -- the DB format (lmdb/hdf5)\n\n Keyword arguments:\n resize_mode -- passed to utils.image.resize_image()\n shuffle -- if True, shuffle the images in the list before creating\n mean_files -- a list of mean files to save\n \"\"\"\n ### Validate arguments\n\n if not os.path.exists(input_file):\n raise ValueError('input_file does not exist')\n if os.path.exists(output_dir):\n logger.warning('removing existing database')\n if os.path.isdir(output_dir):\n shutil.rmtree(output_dir, ignore_errors=True)\n else:\n os.remove(output_dir)\n if image_width <= 0:\n raise ValueError('invalid image width')\n if image_height <= 0:\n raise ValueError('invalid image height')\n if image_channels not in [1,3]:\n raise ValueError('invalid number of channels')\n if resize_mode not in [None, 'crop', 'squash', 'fill', 'half_crop']:\n raise ValueError('invalid resize_mode')\n if image_folder is not None and not os.path.exists(image_folder):\n raise ValueError('image_folder does not exist')\n if mean_files:\n for mean_file in mean_files:\n if os.path.exists(mean_file):\n logger.warning('overwriting existing mean file \"%s\"!' % mean_file)\n else:\n dirname = os.path.dirname(mean_file)\n if not dirname:\n dirname = '.'\n if not os.path.exists(dirname):\n raise ValueError('Cannot save mean file at \"%s\"' % mean_file)\n compute_mean = bool(mean_files)\n\n ### Load lines from input_file into a load_queue\n\n load_queue = Queue.Queue()\n image_count = _fill_load_queue(input_file, load_queue, shuffle)\n\n # Start some load threads\n\n batch_size = _calculate_batch_size(image_count,\n bool(backend=='hdf5'), kwargs.get('hdf5_dset_limit'),\n image_channels, image_height, image_width)\n num_threads = _calculate_num_threads(batch_size, shuffle)\n write_queue = Queue.Queue(2*batch_size)\n summary_queue = Queue.Queue()\n\n for _ in xrange(num_threads):\n p = threading.Thread(target=_load_thread,\n args=(load_queue, write_queue, summary_queue,\n image_width, image_height, image_channels,\n resize_mode, image_folder, compute_mean)\n )\n p.daemon = True\n p.start()\n\n start = time.time()\n\n if backend == 'lmdb':\n _create_lmdb(image_count, write_queue, batch_size, output_dir,\n summary_queue, num_threads,\n mean_files, **kwargs)\n elif backend == 'hdf5':\n _create_hdf5(image_count, write_queue, batch_size, output_dir,\n image_width, image_height, image_channels,\n summary_queue, num_threads,\n mean_files, **kwargs)\n else:\n raise ValueError('invalid backend')\n\n logger.info('Database created after %d seconds.' % (time.time() - start))\n\ndef _create_lmdb(image_count, write_queue, batch_size, output_dir,\n summary_queue, num_threads,\n mean_files = None,\n encoding = None,\n lmdb_map_size = None,\n **kwargs):\n \"\"\"\n Create an LMDB\n\n Keyword arguments:\n encoding -- image encoding format\n lmdb_map_size -- the initial LMDB map size\n \"\"\"\n wait_time = time.time()\n threads_done = 0\n images_loaded = 0\n images_written = 0\n image_sum = None\n batch = []\n compute_mean = bool(mean_files)\n\n db = lmdb.open(output_dir,\n map_size=lmdb_map_size,\n map_async=True,\n max_dbs=0)\n\n while (threads_done < num_threads) or not write_queue.empty():\n\n # Send update every 2 seconds\n if time.time() - wait_time > 2:\n logger.debug('Processed %d/%d' % (images_written, image_count))\n wait_time = time.time()\n\n processed_something = False\n\n if not summary_queue.empty():\n result_count, result_sum = summary_queue.get()\n images_loaded += result_count\n # Update total_image_sum\n if compute_mean and result_count > 0 and result_sum is not None:\n if image_sum is None:\n image_sum = result_sum\n else:\n image_sum += result_sum\n threads_done += 1\n processed_something = True\n\n if not write_queue.empty():\n image, label = write_queue.get()\n datum = _array_to_datum(image, label, encoding)\n batch.append(datum)\n\n if len(batch) == batch_size:\n _write_batch_lmdb(db, batch, images_written)\n images_written += len(batch)\n batch = []\n processed_something = True\n\n if not processed_something:\n time.sleep(0.2)\n\n if len(batch) > 0:\n _write_batch_lmdb(db, batch, images_written)\n images_written += len(batch)\n\n if images_loaded == 0:\n raise LoadError('no images loaded from input file')\n logger.debug('%s images loaded' % images_loaded)\n\n if images_written == 0:\n raise WriteError('no images written to database')\n logger.info('%s images written to database' % images_written)\n\n if compute_mean:\n _save_means(image_sum, images_written, mean_files)\n\n db.close()\n\ndef _create_hdf5(image_count, write_queue, batch_size, output_dir,\n image_width, image_height, image_channels,\n summary_queue, num_threads,\n mean_files = None,\n compression = None,\n hdf5_dset_limit = None,\n **kwargs):\n \"\"\"\n Create an HDF5 file\n\n Keyword arguments:\n compression -- dataset compression format\n \"\"\"\n wait_time = time.time()\n threads_done = 0\n images_loaded = 0\n images_written = 0\n image_sum = None\n batch = []\n compute_mean = bool(mean_files)\n\n writer = Hdf5Writer(\n output_dir = output_dir,\n image_height = image_height,\n image_width = image_width,\n image_channels = image_channels,\n dset_limit = hdf5_dset_limit,\n compression = compression,\n )\n\n while (threads_done < num_threads) or not write_queue.empty():\n\n # Send update every 2 seconds\n if time.time() - wait_time > 2:\n logger.debug('Processed %d/%d' % (images_written, image_count))\n wait_time = time.time()\n\n processed_something = False\n\n if not summary_queue.empty():\n result_count, result_sum = summary_queue.get()\n images_loaded += result_count\n # Update total_image_sum\n if compute_mean and result_count > 0 and result_sum is not None:\n if image_sum is None:\n image_sum = result_sum\n else:\n image_sum += result_sum\n threads_done += 1\n processed_something = True\n\n if not write_queue.empty():\n batch.append(write_queue.get())\n\n if len(batch) == batch_size:\n writer.write_batch(batch)\n images_written += len(batch)\n batch = []\n processed_something = True\n\n if not processed_something:\n time.sleep(0.2)\n\n if len(batch) > 0:\n writer.write_batch(batch)\n images_written += len(batch)\n\n assert images_written == writer.count()\n\n if images_loaded == 0:\n raise LoadError('no images loaded from input file')\n logger.debug('%s images loaded' % images_loaded)\n\n if images_written == 0:\n raise WriteError('no images written to database')\n logger.info('%s images written to database' % images_written)\n\n if compute_mean:\n _save_means(image_sum, images_written, mean_files)\n\ndef _fill_load_queue(filename, queue, shuffle):\n \"\"\"\n Fill the queue with data from the input file\n Print the category distribution\n Returns the number of lines added to the queue\n\n NOTE: This can be slow on a large input file, but we need the total image\n count in order to report the progress, so we might as well read it all\n \"\"\"\n total_lines = 0\n valid_lines = 0\n distribution = Counter()\n\n with open(filename) as infile:\n if shuffle:\n lines = infile.readlines() # less memory efficient\n random.shuffle(lines)\n for line in lines:\n total_lines += 1\n try:\n result = _parse_line(line, distribution)\n valid_lines += 1\n queue.put(result)\n except ParseLineError:\n pass\n else:\n for line in infile: # more memory efficient\n total_lines += 1\n try:\n result = _parse_line(line, distribution)\n valid_lines += 1\n queue.put(result)\n except ParseLineError:\n pass\n\n logger.debug('%s total lines in file' % total_lines)\n if valid_lines == 0:\n raise BadInputFileError('No valid lines in input file')\n logger.info('%s valid lines in file' % valid_lines)\n\n for key in sorted(distribution):\n logger.debug('Category %s has %d images.' % (key, distribution[key]))\n\n return valid_lines\n\ndef _parse_line(line, distribution):\n \"\"\"\n Parse a line in the input file into (path, label)\n \"\"\"\n line = line.strip()\n if not line:\n raise ParseLineError\n\n # Expect format - [/]path/to/file.jpg 123\n match = re.match(r'(.+)\\s+(\\d+)\\s*$', line)\n if match is None:\n raise ParseLineError\n\n path = match.group(1)\n label = int(match.group(2))\n\n distribution[label] += 1\n\n return path, label\n\ndef _calculate_batch_size(image_count, is_hdf5=False, hdf5_dset_limit=None,\n image_channels=None, image_height=None, image_width=None):\n \"\"\"\n Calculates an appropriate batch size for creating this database\n \"\"\"\n if is_hdf5 and hdf5_dset_limit is not None:\n return min(100, image_count, hdf5_dset_limit/(image_channels*image_height*image_width))\n else:\n return min(100, image_count)\n\ndef _calculate_num_threads(batch_size, shuffle):\n \"\"\"\n Calculates an appropriate number of threads for creating this database\n \"\"\"\n if shuffle:\n return min(10, int(round(math.sqrt(batch_size))))\n else:\n #XXX This is the only way to preserve order for now\n # This obviously hurts performance considerably\n return 1\n\ndef _load_thread(load_queue, write_queue, summary_queue,\n image_width, image_height, image_channels,\n resize_mode, image_folder, compute_mean):\n \"\"\"\n Consumes items in load_queue\n Produces items to write_queue\n Stores cumulative results in summary_queue\n \"\"\"\n images_added = 0\n if compute_mean:\n image_sum = _initial_image_sum(image_width, image_height, image_channels)\n else:\n image_sum = None\n\n while not load_queue.empty():\n try:\n path, label = load_queue.get(True, 0.05)\n except Queue.Empty:\n continue\n\n # prepend path with image_folder, if appropriate\n if not utils.is_url(path) and image_folder and not os.path.isabs(path):\n path = os.path.join(image_folder, path)\n\n try:\n image = utils.image.load_image(path)\n except utils.errors.LoadImageError as e:\n logger.warning('[%s] %s: %s' % (path, type(e).__name__, e) )\n continue\n\n image = utils.image.resize_image(image,\n image_height, image_width,\n channels = image_channels,\n resize_mode = resize_mode,\n )\n\n if compute_mean:\n image_sum += image\n\n write_queue.put((image, label))\n images_added += 1\n\n summary_queue.put((images_added, image_sum))\n\ndef _initial_image_sum(width, height, channels):\n \"\"\"\n Returns an array of zeros that will be used to store the accumulated sum of images\n \"\"\"\n if channels == 1:\n return np.zeros((height, width), np.float64)\n else:\n return np.zeros((height, width, channels), np.float64)\n\ndef _array_to_datum(image, label, encoding):\n \"\"\"\n Create a caffe Datum from a numpy.ndarray\n \"\"\"\n if not encoding:\n # Transform to caffe's format requirements\n if image.ndim == 3:\n # Transpose to (channels, height, width)\n image = image.transpose((2,0,1))\n if image.shape[0] == 3:\n # channel swap\n # XXX see issue #59\n image = image[[2,1,0],...]\n elif image.ndim == 2:\n # Add a channels axis\n image = image[np.newaxis,:,:]\n else:\n raise Exception('Image has unrecognized shape: \"%s\"' % image.shape)\n datum = caffe.io.array_to_datum(image, label)\n else:\n datum = caffe_pb2.Datum()\n if image.ndim == 3:\n datum.channels = image.shape[2]\n else:\n datum.channels = 1\n datum.height = image.shape[0]\n datum.width = image.shape[1]\n datum.label = label\n\n s = StringIO()\n if encoding == 'png':\n PIL.Image.fromarray(image).save(s, format='PNG')\n elif encoding == 'jpg':\n PIL.Image.fromarray(image).save(s, format='JPEG', quality=90)\n else:\n raise ValueError('Invalid encoding type')\n datum.data = s.getvalue()\n datum.encoded = True\n return datum\n\ndef _write_batch_lmdb(db, batch, image_count):\n \"\"\"\n Write a batch to an LMDB database\n \"\"\"\n try:\n with db.begin(write=True) as lmdb_txn:\n for i, datum in enumerate(batch):\n key = '%08d_%d' % (image_count + i, datum.label)\n lmdb_txn.put(key, datum.SerializeToString())\n\n except lmdb.MapFullError:\n # double the map_size\n curr_limit = db.info()['map_size']\n new_limit = curr_limit*2\n logger.debug('Doubling LMDB map size to %sMB ...' % (new_limit>>20,))\n try:\n db.set_mapsize(new_limit) # double it\n except AttributeError as e:\n version = tuple(int(x) for x in lmdb.__version__.split('.'))\n if version < (0,87):\n raise Error('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)\n else:\n raise e\n # try again\n _write_batch_lmdb(db, batch, image_count)\n\ndef _save_means(image_sum, image_count, mean_files):\n \"\"\"\n Save mean[s] to file\n \"\"\"\n mean = np.around(image_sum / image_count).astype(np.uint8)\n for mean_file in mean_files:\n if mean_file.lower().endswith('.npy'):\n np.save(mean_file, mean)\n elif mean_file.lower().endswith('.binaryproto'):\n data = mean\n # Transform to caffe's format requirements\n if data.ndim == 3:\n # Transpose to (channels, height, width)\n data = data.transpose((2,0,1))\n if data.shape[0] == 3:\n # channel swap\n # XXX see issue #59\n data = data[[2,1,0],...]\n elif mean.ndim == 2:\n # Add a channels axis\n data = data[np.newaxis,:,:]\n\n blob = caffe_pb2.BlobProto()\n blob.num = 1\n blob.channels, blob.height, blob.width = data.shape\n blob.data.extend(data.astype(float).flat)\n\n with open(mean_file, 'wb') as outfile:\n outfile.write(blob.SerializeToString())\n elif mean_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n image = PIL.Image.fromarray(mean)\n image.save(mean_file)\n else:\n logger.warning('Unrecognized file extension for mean file: \"%s\"' % mean_file)\n continue\n\n logger.info('Mean saved at \"%s\"' % mean_file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Create-Db tool - DIGITS')\n\n ### Positional arguments\n\n parser.add_argument('input_file',\n help='An input file of labeled images')\n parser.add_argument('output_dir',\n help='Path to the output database')\n parser.add_argument('width',\n type=int,\n help='width of resized images'\n )\n parser.add_argument('height',\n type=int,\n help='height of resized images'\n )\n\n ### Optional arguments\n\n parser.add_argument('-c', '--channels',\n type=int,\n default=3,\n help='channels of resized images (1 for grayscale, 3 for color [default])'\n )\n parser.add_argument('-r', '--resize_mode',\n help='resize mode for images (must be \"crop\", \"squash\" [default], \"fill\" or \"half_crop\")'\n )\n parser.add_argument('-m', '--mean_file', action='append',\n help=\"location to output the image mean (doesn't save mean if not specified)\")\n parser.add_argument('-f', '--image_folder',\n help='folder containing the images (if the paths in input_file are not absolute)')\n parser.add_argument('-s', '--shuffle',\n action='store_true',\n help='Shuffle images before saving'\n )\n parser.add_argument('-e', '--encoding',\n help = 'Image encoding format (jpg/png)'\n )\n parser.add_argument('-C', '--compression',\n help = 'Database compression format (gzip)'\n )\n parser.add_argument('-b', '--backend',\n default='lmdb',\n help = 'The database backend - lmdb[default] or hdf5')\n parser.add_argument('--lmdb_map_size',\n type=int,\n help = 'The initial map size for LMDB (in MB)')\n parser.add_argument('--hdf5_dset_limit',\n type=int,\n default=2**31,\n help = 'The size limit for HDF5 datasets')\n\n args = vars(parser.parse_args())\n\n if args['lmdb_map_size']:\n # convert from MB to B\n args['lmdb_map_size'] <<= 20\n\n try:\n create_db(args['input_file'], args['output_dir'],\n args['width'], args['height'], args['channels'],\n args['backend'],\n resize_mode = args['resize_mode'],\n image_folder = args['image_folder'],\n shuffle = args['shuffle'],\n mean_files = args['mean_file'],\n encoding = args['encoding'],\n compression = args['compression'],\n lmdb_map_size = args['lmdb_map_size'],\n hdf5_dset_limit = args['hdf5_dset_limit'],\n )\n except Exception as e:\n logger.error('%s: %s' % (type(e).__name__, e.message))\n raise\n\n"
] |
[
[
"numpy.around",
"numpy.array",
"numpy.zeros",
"numpy.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
henriquesimoes/humpback
|
[
"ba687a71f95ef9c9c30426eefae11a69efd6f942",
"ba687a71f95ef9c9c30426eefae11a69efd6f942"
] |
[
"solutions/2nd-place/bbox_model/utils.py",
"solutions/2nd-place/utils.py"
] |
[
"import torch\nimport cv2\nimport numpy as np\n\nimport sys\nimport os\nimport numpy as np\nimport torch\n\nclass Logger(object):\n def __init__(self, logfile):\n self.terminal = sys.stdout\n self.log = open(logfile, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n # this handles the flush command by doing nothing.\n # you might want to specify some extra behavior here.\n pass\n\ndef split4(data, max_stride, margin):\n splits = []\n data = torch.Tensor.numpy(data)\n _, c, z, h, w = data.shape\n\n w_width = np.ceil(float(w / 2 + margin) / max_stride).astype('int') * max_stride\n h_width = np.ceil(float(h / 2 + margin) / max_stride).astype('int') * max_stride\n pad = int(np.ceil(float(z) / max_stride) * max_stride) - z\n leftpad = pad / 2\n pad = [[0, 0], [0, 0], [leftpad, pad - leftpad], [0, 0], [0, 0]]\n data = np.pad(data, pad, 'constant', constant_values=-1)\n data = torch.from_numpy(data)\n splits.append(data[:, :, :, :h_width, :w_width])\n splits.append(data[:, :, :, :h_width, -w_width:])\n splits.append(data[:, :, :, -h_width:, :w_width])\n splits.append(data[:, :, :, -h_width:, -w_width:])\n\n return torch.cat(splits, 0)\n\n\ndef combine4(output, h, w):\n splits = []\n for i in range(len(output)):\n splits.append(output[i])\n\n output = np.zeros((\n splits[0].shape[0],\n h,\n w,\n splits[0].shape[3],\n splits[0].shape[4]), np.float32)\n\n h0 = output.shape[1] / 2\n h1 = output.shape[1] - h0\n w0 = output.shape[2] / 2\n w1 = output.shape[2] - w0\n\n splits[0] = splits[0][:, :h0, :w0, :, :]\n output[:, :h0, :w0, :, :] = splits[0]\n\n splits[1] = splits[1][:, :h0, -w1:, :, :]\n output[:, :h0, -w1:, :, :] = splits[1]\n\n splits[2] = splits[2][:, -h1:, :w0, :, :]\n output[:, -h1:, :w0, :, :] = splits[2]\n\n splits[3] = splits[3][:, -h1:, -w1:, :, :]\n output[:, -h1:, -w1:, :, :] = splits[3]\n\n return output\n\n\ndef split8(data, max_stride, margin):\n splits = []\n if isinstance(data, np.ndarray):\n c, z, h, w = data.shape\n else:\n _, c, z, h, w = data.size()\n\n z_width = np.ceil(float(z / 2 + margin) / max_stride).astype('int') * max_stride\n w_width = np.ceil(float(w / 2 + margin) / max_stride).astype('int') * max_stride\n h_width = np.ceil(float(h / 2 + margin) / max_stride).astype('int') * max_stride\n for zz in [[0, z_width], [-z_width, None]]:\n for hh in [[0, h_width], [-h_width, None]]:\n for ww in [[0, w_width], [-w_width, None]]:\n if isinstance(data, np.ndarray):\n splits.append(data[np.newaxis, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]])\n else:\n splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]])\n\n if isinstance(data, np.ndarray):\n return np.concatenate(splits, 0)\n else:\n return torch.cat(splits, 0)\n\n\ndef combine8(output, z, h, w):\n splits = []\n for i in range(len(output)):\n splits.append(output[i])\n\n output = np.zeros((\n z,\n h,\n w,\n splits[0].shape[3],\n splits[0].shape[4]), np.float32)\n\n z_width = z / 2\n h_width = h / 2\n w_width = w / 2\n i = 0\n for zz in [[0, z_width], [z_width - z, None]]:\n for hh in [[0, h_width], [h_width - h, None]]:\n for ww in [[0, w_width], [w_width - w, None]]:\n output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[i][zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1],\n :, :]\n i = i + 1\n\n return output\n\n\ndef split16(data, max_stride, margin):\n splits = []\n _, c, z, h, w = data.size()\n\n z_width = np.ceil(float(z / 4 + margin) / max_stride).astype('int') * max_stride\n z_pos = [z * 3 / 8 - z_width / 2,\n z * 5 / 8 - z_width / 2]\n h_width = np.ceil(float(h / 2 + margin) / max_stride).astype('int') * max_stride\n w_width = np.ceil(float(w / 2 + margin) / max_stride).astype('int') * max_stride\n for zz in [[0, z_width], [z_pos[0], z_pos[0] + z_width], [z_pos[1], z_pos[1] + z_width], [-z_width, None]]:\n for hh in [[0, h_width], [-h_width, None]]:\n for ww in [[0, w_width], [-w_width, None]]:\n splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]])\n\n return torch.cat(splits, 0)\n\n\ndef combine16(output, z, h, w):\n splits = []\n for i in range(len(output)):\n splits.append(output[i])\n\n output = np.zeros((\n z,\n h,\n w,\n splits[0].shape[3],\n splits[0].shape[4]), np.float32)\n\n z_width = z / 4\n h_width = h / 2\n w_width = w / 2\n splitzstart = splits[0].shape[0] / 2 - z_width / 2\n z_pos = [z * 3 / 8 - z_width / 2,\n z * 5 / 8 - z_width / 2]\n i = 0\n for zz, zz2 in zip([[0, z_width], [z_width, z_width * 2], [z_width * 2, z_width * 3], [z_width * 3 - z, None]],\n [[0, z_width], [splitzstart, z_width + splitzstart], [splitzstart, z_width + splitzstart],\n [z_width * 3 - z, None]]):\n for hh in [[0, h_width], [h_width - h, None]]:\n for ww in [[0, w_width], [w_width - w, None]]:\n output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[i][zz2[0]:zz2[1], hh[0]:hh[1], ww[0]:ww[1],\n :, :]\n i = i + 1\n\n return output\n\n\ndef split32(data, max_stride, margin):\n splits = []\n _, c, z, h, w = data.size()\n\n z_width = np.ceil(float(z / 2 + margin) / max_stride).astype('int') * max_stride\n w_width = np.ceil(float(w / 4 + margin) / max_stride).astype('int') * max_stride\n h_width = np.ceil(float(h / 4 + margin) / max_stride).astype('int') * max_stride\n\n w_pos = [w * 3 / 8 - w_width / 2,\n w * 5 / 8 - w_width / 2]\n h_pos = [h * 3 / 8 - h_width / 2,\n h * 5 / 8 - h_width / 2]\n\n for zz in [[0, z_width], [-z_width, None]]:\n for hh in [[0, h_width], [h_pos[0], h_pos[0] + h_width], [h_pos[1], h_pos[1] + h_width], [-h_width, None]]:\n for ww in [[0, w_width], [w_pos[0], w_pos[0] + w_width], [w_pos[1], w_pos[1] + w_width], [-w_width, None]]:\n splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]])\n\n return torch.cat(splits, 0)\n\n\ndef combine32(splits, z, h, w):\n output = np.zeros((\n z,\n h,\n w,\n splits[0].shape[3],\n splits[0].shape[4]), np.float32)\n\n z_width = int(np.ceil(float(z) / 2))\n h_width = int(np.ceil(float(h) / 4))\n w_width = int(np.ceil(float(w) / 4))\n splithstart = splits[0].shape[1] / 2 - h_width / 2\n splitwstart = splits[0].shape[2] / 2 - w_width / 2\n\n i = 0\n for zz in [[0, z_width], [z_width - z, None]]:\n\n for hh, hh2 in zip([[0, h_width], [h_width, h_width * 2], [h_width * 2, h_width * 3], [h_width * 3 - h, None]],\n [[0, h_width], [splithstart, h_width + splithstart], [splithstart, h_width + splithstart],\n [h_width * 3 - h, None]]):\n\n for ww, ww2 in zip(\n [[0, w_width], [w_width, w_width * 2], [w_width * 2, w_width * 3], [w_width * 3 - w, None]],\n [[0, w_width], [splitwstart, w_width + splitwstart], [splitwstart, w_width + splitwstart],\n [w_width * 3 - w, None]]):\n output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[i][zz[0]:zz[1], hh2[0]:hh2[1],\n ww2[0]:ww2[1], :, :]\n i = i + 1\n\n return output\n\n\ndef split64(data, max_stride, margin):\n splits = []\n _, c, z, h, w = data.size()\n\n z_width = np.ceil(float(z / 4 + margin) / max_stride).astype('int') * max_stride\n w_width = np.ceil(float(w / 4 + margin) / max_stride).astype('int') * max_stride\n h_width = np.ceil(float(h / 4 + margin) / max_stride).astype('int') * max_stride\n\n z_pos = [z * 3 / 8 - z_width / 2,\n z * 5 / 8 - z_width / 2]\n w_pos = [w * 3 / 8 - w_width / 2,\n w * 5 / 8 - w_width / 2]\n h_pos = [h * 3 / 8 - h_width / 2,\n h * 5 / 8 - h_width / 2]\n\n for zz in [[0, z_width], [z_pos[0], z_pos[0] + z_width], [z_pos[1], z_pos[1] + z_width], [-z_width, None]]:\n for hh in [[0, h_width], [h_pos[0], h_pos[0] + h_width], [h_pos[1], h_pos[1] + h_width], [-h_width, None]]:\n for ww in [[0, w_width], [w_pos[0], w_pos[0] + w_width], [w_pos[1], w_pos[1] + w_width], [-w_width, None]]:\n splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]])\n\n return torch.cat(splits, 0)\n\n\ndef combine64(output, z, h, w):\n splits = []\n for i in range(len(output)):\n splits.append(output[i])\n\n output = np.zeros((\n z,\n h,\n w,\n splits[0].shape[3],\n splits[0].shape[4]), np.float32)\n\n z_width = int(np.ceil(float(z) / 4))\n h_width = int(np.ceil(float(h) / 4))\n w_width = int(np.ceil(float(w) / 4))\n splitzstart = splits[0].shape[0] / 2 - z_width / 2\n splithstart = splits[0].shape[1] / 2 - h_width / 2\n splitwstart = splits[0].shape[2] / 2 - w_width / 2\n\n i = 0\n for zz, zz2 in zip([[0, z_width], [z_width, z_width * 2], [z_width * 2, z_width * 3], [z_width * 3 - z, None]],\n [[0, z_width], [splitzstart, z_width + splitzstart], [splitzstart, z_width + splitzstart],\n [z_width * 3 - z, None]]):\n\n for hh, hh2 in zip([[0, h_width], [h_width, h_width * 2], [h_width * 2, h_width * 3], [h_width * 3 - h, None]],\n [[0, h_width], [splithstart, h_width + splithstart], [splithstart, h_width + splithstart],\n [h_width * 3 - h, None]]):\n\n for ww, ww2 in zip(\n [[0, w_width], [w_width, w_width * 2], [w_width * 2, w_width * 3], [w_width * 3 - w, None]],\n [[0, w_width], [splitwstart, w_width + splitwstart], [splitwstart, w_width + splitwstart],\n [w_width * 3 - w, None]]):\n output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[i][zz2[0]:zz2[1], hh2[0]:hh2[1],\n ww2[0]:ww2[1], :, :]\n i = i + 1\n\n return output\n\n\ndef bbox_iou(anchor_bboxes, gt_bboxes):\n '''\n :param anchor_bboxes: [x1, y1, x2, y2] \n :param gt_bboxes: [x1, y1, x2, y2]\n :return: \n '''\n N = anchor_bboxes.size(0)\n M = gt_bboxes.size(0)\n lb = torch.max(anchor_bboxes[:, None, :2], gt_bboxes[:, :2]) # [N, M, 2]\n rb = torch.min(anchor_bboxes[:, None, 2:], gt_bboxes[:, 2:]) # [N, M, 2]\n wh = (rb - lb + 1).clamp(min=0) # [N, M, 2]\n intersection = wh[:, :, 0] * wh[:, :, 1] # [N, M]\n area1 = ((anchor_bboxes[:, 2] - anchor_bboxes[:, 0]) + 1) * ((anchor_bboxes[:, 3] - anchor_bboxes[:, 1]) + 1) # [N,]\n area2 = ((gt_bboxes[:, 2] - gt_bboxes[:, 0]) + 1) * ((gt_bboxes[:, 3] - gt_bboxes[:, 1]) + 1) # [M,]\n iou = intersection / (area1[:, None] + area2 - intersection)\n return iou\n\ndef bbox_nms(bboxes, scores, threshold=0.5, mode='union', topk=5):\n '''Non maximum suppression.\n Args:\n bboxes: (tensor) bounding boxes, sized [N,4].\n scores: (tensor) bbox scores, sized [N,].\n threshold: (float) overlap threshold.\n mode: (str) 'union' or 'min'.\n Returns:\n keep: (tensor) selected indices.\n Reference:\n https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py\n '''\n x1 = bboxes[:,0]\n y1 = bboxes[:,1]\n x2 = bboxes[:,2]\n y2 = bboxes[:,3]\n\n areas = (x2-x1+1) * (y2-y1+1)\n _, order = scores.sort(0, descending=True)\n\n keep = []\n while order.numel() > 0:\n i = order[0]\n keep.append(i)\n\n if order.numel() == 1:\n break\n\n xx1 = x1[order[1:]].clamp(min=x1[i])\n yy1 = y1[order[1:]].clamp(min=y1[i])\n xx2 = x2[order[1:]].clamp(max=x2[i])\n yy2 = y2[order[1:]].clamp(max=y2[i])\n\n w = (xx2-xx1+1).clamp(min=0)\n h = (yy2-yy1+1).clamp(min=0)\n inter = w*h\n\n if mode == 'union':\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == 'min':\n ovr = inter / areas[order[1:]].clamp(max=areas[i])\n else:\n raise TypeError('Unknown nms mode: %s.' % mode)\n\n ids = (ovr<=threshold).nonzero().squeeze()\n if ids.numel() == 0:\n break\n order = order[ids+1]\n keep = keep[:topk]\n return torch.LongTensor(keep)\n\ndef draw_bbox(image, bboxes, probs, save_path, gt_bboxes=None):\n '''\n :param image: \n :param bboxes: [[x1, y1, x2, y2], ...]\n :param labels: string list\n :param probs: float array\n :param save_path: string end with file name\n :return: \n '''\n alpha = 0.5\n color = (0, 255, 0)\n thick = 1\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 0.5\n overlay = image.copy()\n for bbox, prob in zip(bboxes, probs):\n label_txt = 'Prob: %.2f'%prob\n x1, y1, x2, y2 = np.round(bbox).astype(np.int)\n overlay = cv2.rectangle(overlay, (x1, y1), (x2, y2), color, thick)\n txt_size = cv2.getTextSize(label_txt, font, font_scale, thick)\n overlay = cv2.rectangle(overlay, (x1, y1-txt_size[0][1]), (x1+txt_size[0][0], y1), color, cv2.FILLED)\n overlay = cv2.putText(overlay, label_txt, (x1, y1), font, font_scale, (255, 255, 255), thick, cv2.LINE_AA)\n if gt_bboxes is not None:\n for bbox in gt_bboxes:\n x1, y1, x2, y2 = np.round(bbox).astype(np.int)\n overlay = cv2.rectangle(overlay, (x1, y1), (x2, y2), (0, 0, 255), thick)\n image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)\n cv2.imwrite(save_path, image)\n\ndef draw_keypoint_with_caption(image, keypoint, text):\n '''\n :param image: \n :param keypoint: [x, y]\n :param text: string\n :return: image\n '''\n alpha = 0.5\n color1 = (0, 255, 0)\n thick = 2\n l = 5\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 0.5\n overlay = image.copy()\n x, y = keypoint\n overlay = cv2.line(overlay, (x - l, y - l), (x + l, y + l), color1, thick)\n overlay = cv2.line(overlay, (x - l, y + l), (x + l, y - l), color1, thick)\n overlay = cv2.putText(overlay, text, (0, image.shape[0]), font, font_scale, (0, 0, 0), thick, cv2.LINE_AA)\n image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)\n return image\n\ncolor_palette = [(136, 112, 246),\n (49, 136, 219),\n (49, 156, 173),\n (49, 170, 119),\n (122, 176, 51),\n (164, 172, 53),\n (197, 168, 56),\n (244, 154, 110),\n (244, 121, 204),\n (204, 101, 245)] # husl\n\ndef draw_keypoints(image, keypoints, gt_keypoints=None):\n '''\n :param image: \n :param keypoints: [[x, y, v], ...]\n :return: \n '''\n alpha = 0.8\n color1 = (0, 255, 0)\n color2 = (0, 0, 255)\n thick = 2\n l = 5\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 0.5\n overlay = image.copy()\n\n if gt_keypoints is None:\n for i, kpt in enumerate(keypoints):\n x, y, v = kpt\n if v > 0:\n overlay = cv2.line(overlay, (x-l, y-l), (x+l, y+l), color_palette[i%len(color_palette)], thick)\n overlay = cv2.line(overlay, (x-l, y+l), (x+l, y-l), color_palette[i%len(color_palette)], thick)\n\n if gt_keypoints is not None:\n for k in range(len(keypoints)):\n gtx, gty, gtv = gt_keypoints[k]\n x, y, v = keypoints[k]\n if gtv > 0:\n overlay = cv2.line(overlay, (x - l, y - l), (x + l, y + l), color1, thick)\n overlay = cv2.line(overlay, (x - l, y + l), (x + l, y - l), color1, thick)\n overlay = cv2.putText(overlay, str(k), (x, y), font, font_scale, color1, thick, cv2.LINE_AA)\n overlay = cv2.line(overlay, (gtx - l, gty - l), (gtx + l, gty + l), color2, thick)\n overlay = cv2.line(overlay, (gtx - l, gty + l), (gtx + l, gty - l), color2, thick)\n overlay = cv2.putText(overlay, str(k), (gtx, gty), font, font_scale, color2, thick, cv2.LINE_AA)\n image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)\n return image\n\ndef draw_heatmap(image, heatmap):\n '''\n :param image: \n :param heatmap: \n :param save_path: \n :return: \n '''\n hp_max = np.amax(heatmap)\n scale = 1\n if hp_max != 0:\n scale = 255 // hp_max\n heatmap = (heatmap * scale).astype(np.uint8)\n alpha = 0.7\n heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n fin = cv2.addWeighted(heatmap_img, alpha, image, 1 - alpha, 0)\n return fin\n\ndef normalized_error(preds, targets, widths):\n '''\n :param preds: [[x, y, v], ...]\n :param targets: [[x, y, v], ...]\n :param widths: [[w1], [w2], ...]\n :return: \n '''\n dist = preds[:, :2] - targets[:, :2]\n dist = np.sqrt(dist[:, 0]**2 + dist[:, 1]**2)\n targets = np.copy(targets)\n targets[targets<0] = 0\n if np.sum(targets[:, 2]) == 0:\n return 0\n ne = np.sum(dist/widths * targets[:, 2]) / np.sum(targets[:, 2])\n return ne",
"from include import *\nfrom torch.autograd import Variable\nimport torch\nimport numpy as np\nfrom process.data_helper import *\n\ndef save(list_or_dict,name):\n f = open(name, 'w')\n f.write(str(list_or_dict))\n f.close()\n\ndef load(name):\n f = open(name, 'r')\n a = f.read()\n tmp = eval(a)\n f.close()\n return tmp\n\ndef dot_numpy(vector1 , vector2,emb_size = 512):\n vector1 = vector1.reshape([-1, emb_size])\n vector2 = vector2.reshape([-1, emb_size])\n vector2 = vector2.transpose(1,0)\n\n cosV12 = np.dot(vector1, vector2)\n return cosV12\n\ndef to_var(x, volatile=False):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, volatile=volatile)\n\ndef metric(prob, label, thres = 0.5):\n shape = prob.shape\n prob_tmp = np.ones([shape[0], shape[1] + 1]) * thres\n prob_tmp[:, :shape[1]] = prob\n precision , top5 = top_n_np(prob_tmp, label)\n return precision, top5\n\ndef top_n_np(preds, labels):\n n = 5\n predicted = np.fliplr(preds.argsort(axis=1)[:, -n:])\n top5 = []\n\n re = 0\n for i in range(len(preds)):\n predicted_tmp = predicted[i]\n labels_tmp = labels[i]\n for n_ in range(5):\n re += np.sum(labels_tmp == predicted_tmp[n_]) / (n_ + 1.0)\n\n re = re / len(preds)\n for i in range(n):\n top5.append(np.sum(labels == predicted[:, i])/ (1.0*len(labels)))\n return re, top5\n\n\ndef prob_to_csv_top5(prob, key_id, name):\n CLASS_NAME,_ = load_CLASS_NAME()\n\n prob = np.asarray(prob)\n print(prob.shape)\n\n top = np.argsort(-prob,1)[:,:5]\n word = []\n index = 0\n\n rs = []\n\n for (t0,t1,t2,t3,t4) in top:\n word.append(\n CLASS_NAME[t0] + ' ' + \\\n CLASS_NAME[t1] + ' ' + \\\n CLASS_NAME[t2])\n\n top_k_label_name = r''\n label = CLASS_NAME[t0]\n score = prob[index][t0]\n top_k_label_name += label + ' ' + str(score) + ' '\n\n label = CLASS_NAME[t1]\n score = prob[index][t1]\n top_k_label_name += label + ' ' + str(score) + ' '\n\n label = CLASS_NAME[t2]\n score = prob[index][t2]\n top_k_label_name += label + ' ' + str(score) + ' '\n\n label = CLASS_NAME[t3]\n score = prob[index][t3]\n top_k_label_name += label + ' ' + str(score) + ' '\n\n label = CLASS_NAME[t4]\n score = prob[index][t4]\n top_k_label_name += label + ' ' + str(score) + ' '\n\n rs.append(top_k_label_name)\n index += 1\n\n pd.DataFrame({'key_id':key_id, 'word':rs}).to_csv( '{}.csv'.format(name), index=None)\n"
] |
[
[
"torch.LongTensor",
"numpy.amax",
"numpy.pad",
"torch.max",
"torch.cat",
"numpy.sqrt",
"torch.min",
"torch.from_numpy",
"torch.Tensor.numpy",
"numpy.concatenate",
"numpy.round",
"numpy.copy",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.dot",
"numpy.asarray",
"numpy.ones",
"torch.cuda.is_available",
"numpy.argsort",
"numpy.sum",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tushar-deepsource/pandas
|
[
"700be617eb567fb4ab82aa8151d5c4ee02c22b95"
] |
[
"pandas/tests/frame/test_query_eval.py"
] |
[
"import operator\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.computation.check import NUMEXPR_INSTALLED\n\nPARSERS = \"python\", \"pandas\"\nENGINES = \"python\", pytest.param(\"numexpr\", marks=td.skip_if_no_ne)\n\n\[email protected](params=PARSERS, ids=lambda x: x)\ndef parser(request):\n return request.param\n\n\[email protected](params=ENGINES, ids=lambda x: x)\ndef engine(request):\n return request.param\n\n\ndef skip_if_no_pandas_parser(parser):\n if parser != \"pandas\":\n pytest.skip(f\"cannot evaluate with parser {repr(parser)}\")\n\n\nclass TestCompat:\n def setup_method(self, method):\n self.df = DataFrame({\"A\": [1, 2, 3]})\n self.expected1 = self.df[self.df.A > 0]\n self.expected2 = self.df.A + 1\n\n def test_query_default(self):\n\n # GH 12749\n # this should always work, whether NUMEXPR_INSTALLED or not\n df = self.df\n result = df.query(\"A>0\")\n tm.assert_frame_equal(result, self.expected1)\n result = df.eval(\"A+1\")\n tm.assert_series_equal(result, self.expected2, check_names=False)\n\n def test_query_None(self):\n\n df = self.df\n result = df.query(\"A>0\", engine=None)\n tm.assert_frame_equal(result, self.expected1)\n result = df.eval(\"A+1\", engine=None)\n tm.assert_series_equal(result, self.expected2, check_names=False)\n\n def test_query_python(self):\n\n df = self.df\n result = df.query(\"A>0\", engine=\"python\")\n tm.assert_frame_equal(result, self.expected1)\n result = df.eval(\"A+1\", engine=\"python\")\n tm.assert_series_equal(result, self.expected2, check_names=False)\n\n def test_query_numexpr(self):\n\n df = self.df\n if NUMEXPR_INSTALLED:\n result = df.query(\"A>0\", engine=\"numexpr\")\n tm.assert_frame_equal(result, self.expected1)\n result = df.eval(\"A+1\", engine=\"numexpr\")\n tm.assert_series_equal(result, self.expected2, check_names=False)\n else:\n msg = (\n r\"'numexpr' is not installed or an unsupported version. \"\n r\"Cannot use engine='numexpr' for query/eval if 'numexpr' is \"\n r\"not installed\"\n )\n with pytest.raises(ImportError, match=msg):\n df.query(\"A>0\", engine=\"numexpr\")\n with pytest.raises(ImportError, match=msg):\n df.eval(\"A+1\", engine=\"numexpr\")\n\n\nclass TestDataFrameEval:\n\n # smaller hits python, larger hits numexpr\n @pytest.mark.parametrize(\"n\", [4, 4000])\n @pytest.mark.parametrize(\n \"op_str,op,rop\",\n [\n (\"+\", \"__add__\", \"__radd__\"),\n (\"-\", \"__sub__\", \"__rsub__\"),\n (\"*\", \"__mul__\", \"__rmul__\"),\n (\"/\", \"__truediv__\", \"__rtruediv__\"),\n ],\n )\n def test_ops(self, op_str, op, rop, n):\n\n # tst ops and reversed ops in evaluation\n # GH7198\n\n df = DataFrame(1, index=range(n), columns=list(\"abcd\"))\n df.iloc[0] = 2\n m = df.mean()\n\n base = DataFrame( # noqa\n np.tile(m.values, n).reshape(n, -1), columns=list(\"abcd\")\n )\n\n expected = eval(f\"base {op_str} df\")\n\n # ops as strings\n result = eval(f\"m {op_str} df\")\n tm.assert_frame_equal(result, expected)\n\n # these are commutative\n if op in [\"+\", \"*\"]:\n result = getattr(df, op)(m)\n tm.assert_frame_equal(result, expected)\n\n # these are not\n elif op in [\"-\", \"/\"]:\n result = getattr(df, rop)(m)\n tm.assert_frame_equal(result, expected)\n\n def test_dataframe_sub_numexpr_path(self):\n # GH7192: Note we need a large number of rows to ensure this\n # goes through the numexpr path\n df = DataFrame({\"A\": np.random.randn(25000)})\n df.iloc[0:5] = np.nan\n expected = 1 - np.isnan(df.iloc[0:25])\n result = (1 - np.isnan(df)).iloc[0:25]\n tm.assert_frame_equal(result, expected)\n\n def test_query_non_str(self):\n # GH 11485\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"b\"]})\n\n msg = \"expr must be a string to be evaluated\"\n with pytest.raises(ValueError, match=msg):\n df.query(lambda x: x.B == \"b\")\n\n with pytest.raises(ValueError, match=msg):\n df.query(111)\n\n def test_query_empty_string(self):\n # GH 13139\n df = DataFrame({\"A\": [1, 2, 3]})\n\n msg = \"expr cannot be an empty string\"\n with pytest.raises(ValueError, match=msg):\n df.query(\"\")\n\n def test_eval_resolvers_as_list(self):\n # GH 14095\n df = DataFrame(np.random.randn(10, 2), columns=list(\"ab\"))\n dict1 = {\"a\": 1}\n dict2 = {\"b\": 2}\n assert df.eval(\"a + b\", resolvers=[dict1, dict2]) == dict1[\"a\"] + dict2[\"b\"]\n assert pd.eval(\"a + b\", resolvers=[dict1, dict2]) == dict1[\"a\"] + dict2[\"b\"]\n\n def test_eval_object_dtype_binop(self):\n # GH#24883\n df = DataFrame({\"a1\": [\"Y\", \"N\"]})\n res = df.eval(\"c = ((a1 == 'Y') & True)\")\n expected = DataFrame({\"a1\": [\"Y\", \"N\"], \"c\": [True, False]})\n tm.assert_frame_equal(res, expected)\n\n\nclass TestDataFrameQueryWithMultiIndex:\n def test_query_with_named_multiindex(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n a = np.random.choice([\"red\", \"green\"], size=10)\n b = np.random.choice([\"eggs\", \"ham\"], size=10)\n index = MultiIndex.from_arrays([a, b], names=[\"color\", \"food\"])\n df = DataFrame(np.random.randn(10, 2), index=index)\n ind = Series(\n df.index.get_level_values(\"color\").values, index=index, name=\"color\"\n )\n\n # equality\n res1 = df.query('color == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == color', parser=parser, engine=engine)\n exp = df[ind == \"red\"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('color != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != color', parser=parser, engine=engine)\n exp = df[ind != \"red\"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('color == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == color', parser=parser, engine=engine)\n exp = df[ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('color != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != color', parser=parser, engine=engine)\n exp = df[~ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" in color', parser=parser, engine=engine)\n exp = df[ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in color', parser=parser, engine=engine)\n exp = df[~ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n def test_query_with_unnamed_multiindex(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n a = np.random.choice([\"red\", \"green\"], size=10)\n b = np.random.choice([\"eggs\", \"ham\"], size=10)\n index = MultiIndex.from_arrays([a, b])\n df = DataFrame(np.random.randn(10, 2), index=index)\n ind = Series(df.index.get_level_values(0).values, index=index)\n\n res1 = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == ilevel_0', parser=parser, engine=engine)\n exp = df[ind == \"red\"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != ilevel_0', parser=parser, engine=engine)\n exp = df[ind != \"red\"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_0 == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_0 != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" in ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin([\"red\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # ## LEVEL 1\n ind = Series(df.index.get_level_values(1).values, index=index)\n res1 = df.query('ilevel_1 == \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" == ilevel_1', parser=parser, engine=engine)\n exp = df[ind == \"eggs\"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_1 != \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" != ilevel_1', parser=parser, engine=engine)\n exp = df[ind != \"eggs\"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_1 == [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] == ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin([\"eggs\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_1 != [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] != ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin([\"eggs\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"eggs\"] in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" in ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin([\"eggs\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"eggs\"] not in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" not in ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin([\"eggs\"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n def test_query_with_partially_named_multiindex(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n a = np.random.choice([\"red\", \"green\"], size=10)\n b = np.arange(10)\n index = MultiIndex.from_arrays([a, b])\n index.names = [None, \"rating\"]\n df = DataFrame(np.random.randn(10, 2), index=index)\n res = df.query(\"rating == 1\", parser=parser, engine=engine)\n ind = Series(\n df.index.get_level_values(\"rating\").values, index=index, name=\"rating\"\n )\n exp = df[ind == 1]\n tm.assert_frame_equal(res, exp)\n\n res = df.query(\"rating != 1\", parser=parser, engine=engine)\n ind = Series(\n df.index.get_level_values(\"rating\").values, index=index, name=\"rating\"\n )\n exp = df[ind != 1]\n tm.assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind == \"red\"]\n tm.assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind != \"red\"]\n tm.assert_frame_equal(res, exp)\n\n def test_query_multiindex_get_index_resolvers(self):\n df = tm.makeCustomDataframe(\n 10, 3, r_idx_nlevels=2, r_idx_names=[\"spam\", \"eggs\"]\n )\n resolvers = df._get_index_resolvers()\n\n def to_series(mi, level):\n level_values = mi.get_level_values(level)\n s = level_values.to_series()\n s.index = mi\n return s\n\n col_series = df.columns.to_series()\n expected = {\n \"index\": df.index,\n \"columns\": col_series,\n \"spam\": to_series(df.index, \"spam\"),\n \"eggs\": to_series(df.index, \"eggs\"),\n \"C0\": col_series,\n }\n for k, v in resolvers.items():\n if isinstance(v, Index):\n assert v.is_(expected[k])\n elif isinstance(v, Series):\n tm.assert_series_equal(v, expected[k])\n else:\n raise AssertionError(\"object must be a Series or Index\")\n\n\[email protected]_if_no_ne\nclass TestDataFrameQueryNumExprPandas:\n @classmethod\n def setup_class(cls):\n cls.engine = \"numexpr\"\n cls.parser = \"pandas\"\n\n @classmethod\n def teardown_class(cls):\n del cls.engine, cls.parser\n\n def test_date_query_with_attribute_access(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n df = DataFrame(np.random.randn(5, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=5)\n df[\"dates2\"] = date_range(\"1/1/2013\", periods=5)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=5)\n res = df.query(\n \"@df.dates1 < 20130101 < @df.dates3\", engine=engine, parser=parser\n )\n expec = df[(df.dates1 < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randn(5, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=5)\n df[\"dates2\"] = date_range(\"1/1/2013\", periods=5)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=5)\n res = df.query(\"dates1 < 20130101 < dates3\", engine=engine, parser=parser)\n expec = df[(df.dates1 < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(np.random.randn(n, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n df[\"dates2\"] = date_range(\"1/1/2013\", periods=n)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n df.loc[np.random.rand(n) > 0.5, \"dates1\"] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, \"dates3\"] = pd.NaT\n res = df.query(\"dates1 < 20130101 < dates3\", engine=engine, parser=parser)\n expec = df[(df.dates1 < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(np.random.randn(n, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n return_value = df.set_index(\"dates1\", inplace=True, drop=True)\n assert return_value is None\n res = df.query(\"index < 20130101 < dates3\", engine=engine, parser=parser)\n expec = df[(df.index < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(np.random.randn(n, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n df.iloc[0, 0] = pd.NaT\n return_value = df.set_index(\"dates1\", inplace=True, drop=True)\n assert return_value is None\n res = df.query(\"index < 20130101 < dates3\", engine=engine, parser=parser)\n expec = df[(df.index < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n d = {}\n d[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n d[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n df = DataFrame(d)\n df.loc[np.random.rand(n) > 0.5, \"dates1\"] = pd.NaT\n return_value = df.set_index(\"dates1\", inplace=True, drop=True)\n assert return_value is None\n res = df.query(\"dates1 < 20130101 < dates3\", engine=engine, parser=parser)\n expec = df[(df.index.to_series() < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_non_date(self):\n engine, parser = self.engine, self.parser\n\n n = 10\n df = DataFrame(\n {\"dates\": date_range(\"1/1/2012\", periods=n), \"nondate\": np.arange(n)}\n )\n\n result = df.query(\"dates == nondate\", parser=parser, engine=engine)\n assert len(result) == 0\n\n result = df.query(\"dates != nondate\", parser=parser, engine=engine)\n tm.assert_frame_equal(result, df)\n\n msg = r\"Invalid comparison between dtype=datetime64\\[ns\\] and ndarray\"\n for op in [\"<\", \">\", \"<=\", \">=\"]:\n with pytest.raises(TypeError, match=msg):\n df.query(f\"dates {op} nondate\", parser=parser, engine=engine)\n\n def test_query_syntax_error(self):\n engine, parser = self.engine, self.parser\n df = DataFrame({\"i\": range(10), \"+\": range(3, 13), \"r\": range(4, 14)})\n msg = \"invalid syntax\"\n with pytest.raises(SyntaxError, match=msg):\n df.query(\"i - +\", engine=engine, parser=parser)\n\n def test_query_scope(self):\n from pandas.core.computation.ops import UndefinedVariableError\n\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(20, 2), columns=list(\"ab\"))\n\n a, b = 1, 2 # noqa\n res = df.query(\"a > b\", engine=engine, parser=parser)\n expected = df[df.a > df.b]\n tm.assert_frame_equal(res, expected)\n\n res = df.query(\"@a > b\", engine=engine, parser=parser)\n expected = df[a > df.b]\n tm.assert_frame_equal(res, expected)\n\n # no local variable c\n with pytest.raises(\n UndefinedVariableError, match=\"local variable 'c' is not defined\"\n ):\n df.query(\"@a > b > @c\", engine=engine, parser=parser)\n\n # no column named 'c'\n with pytest.raises(UndefinedVariableError, match=\"name 'c' is not defined\"):\n df.query(\"@a > b > c\", engine=engine, parser=parser)\n\n def test_query_doesnt_pickup_local(self):\n from pandas.core.computation.ops import UndefinedVariableError\n\n engine, parser = self.engine, self.parser\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list(\"abc\"))\n\n # we don't pick up the local 'sin'\n with pytest.raises(UndefinedVariableError, match=\"name 'sin' is not defined\"):\n df.query(\"sin > 5\", engine=engine, parser=parser)\n\n def test_query_builtin(self):\n from pandas.core.computation.engines import NumExprClobberingError\n\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list(\"abc\"))\n\n df.index.name = \"sin\"\n msg = \"Variables in expression.+\"\n with pytest.raises(NumExprClobberingError, match=msg):\n df.query(\"sin > 5\", engine=engine, parser=parser)\n\n def test_query(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randn(10, 3), columns=[\"a\", \"b\", \"c\"])\n\n tm.assert_frame_equal(\n df.query(\"a < b\", engine=engine, parser=parser), df[df.a < df.b]\n )\n tm.assert_frame_equal(\n df.query(\"a + b > b * c\", engine=engine, parser=parser),\n df[df.a + df.b > df.b * df.c],\n )\n\n def test_query_index_with_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(\n np.random.randint(10, size=(10, 3)),\n index=Index(range(10), name=\"blob\"),\n columns=[\"a\", \"b\", \"c\"],\n )\n res = df.query(\"(blob < 5) & (a < b)\", engine=engine, parser=parser)\n expec = df[(df.index < 5) & (df.a < df.b)]\n tm.assert_frame_equal(res, expec)\n\n res = df.query(\"blob < b\", engine=engine, parser=parser)\n expec = df[df.index < df.b]\n\n tm.assert_frame_equal(res, expec)\n\n def test_query_index_without_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(\n np.random.randint(10, size=(10, 3)),\n index=range(10),\n columns=[\"a\", \"b\", \"c\"],\n )\n\n # \"index\" should refer to the index\n res = df.query(\"index < b\", engine=engine, parser=parser)\n expec = df[df.index < df.b]\n tm.assert_frame_equal(res, expec)\n\n # test against a scalar\n res = df.query(\"index < 5\", engine=engine, parser=parser)\n expec = df[df.index < 5]\n tm.assert_frame_equal(res, expec)\n\n def test_nested_scope(self):\n engine = self.engine\n parser = self.parser\n\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n expected = df[(df > 0) & (df2 > 0)]\n\n result = df.query(\"(@df > 0) & (@df2 > 0)\", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n result = pd.eval(\"df[df > 0 and df2 > 0]\", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n result = pd.eval(\n \"df[df > 0 and df2 > 0 and df[df > 0] > 0]\", engine=engine, parser=parser\n )\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n tm.assert_frame_equal(result, expected)\n\n result = pd.eval(\"df[(df>0) & (df2>0)]\", engine=engine, parser=parser)\n expected = df.query(\"(@df>0) & (@df2>0)\", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n def test_nested_raises_on_local_self_reference(self):\n from pandas.core.computation.ops import UndefinedVariableError\n\n df = DataFrame(np.random.randn(5, 3))\n\n # can't reference ourself b/c we're a local so @ is necessary\n with pytest.raises(UndefinedVariableError, match=\"name 'df' is not defined\"):\n df.query(\"df > 0\", engine=self.engine, parser=self.parser)\n\n def test_local_syntax(self):\n skip_if_no_pandas_parser(self.parser)\n\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randn(100, 10), columns=list(\"abcdefghij\"))\n b = 1\n expect = df[df.a < b]\n result = df.query(\"a < @b\", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expect)\n\n expect = df[df.a < df.b]\n result = df.query(\"a < b\", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expect)\n\n def test_chained_cmp_and_in(self):\n skip_if_no_pandas_parser(self.parser)\n engine, parser = self.engine, self.parser\n cols = list(\"abc\")\n df = DataFrame(np.random.randn(100, len(cols)), columns=cols)\n res = df.query(\n \"a < b < c and a not in b not in c\", engine=engine, parser=parser\n )\n ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)\n expec = df[ind]\n tm.assert_frame_equal(res, expec)\n\n def test_local_variable_with_in(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n a = Series(np.random.randint(3, size=15), name=\"a\")\n b = Series(np.random.randint(10, size=15), name=\"b\")\n df = DataFrame({\"a\": a, \"b\": b})\n\n expected = df.loc[(df.b - 1).isin(a)]\n result = df.query(\"b - 1 in a\", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n b = Series(np.random.randint(10, size=15), name=\"b\")\n expected = df.loc[(b - 1).isin(a)]\n result = df.query(\"@b - 1 in a\", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n def test_at_inside_string(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n c = 1 # noqa\n df = DataFrame({\"a\": [\"a\", \"a\", \"b\", \"b\", \"@c\", \"@c\"]})\n result = df.query('a == \"@c\"', engine=engine, parser=parser)\n expected = df[df.a == \"@c\"]\n tm.assert_frame_equal(result, expected)\n\n def test_query_undefined_local(self):\n from pandas.core.computation.ops import UndefinedVariableError\n\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.rand(10, 2), columns=list(\"ab\"))\n with pytest.raises(\n UndefinedVariableError, match=\"local variable 'c' is not defined\"\n ):\n df.query(\"a == @c\", engine=engine, parser=parser)\n\n def test_index_resolvers_come_after_columns_with_the_same_name(self):\n n = 1 # noqa\n a = np.r_[20:101:20]\n\n df = DataFrame({\"index\": a, \"b\": np.random.randn(a.size)})\n df.index.name = \"index\"\n result = df.query(\"index > 5\", engine=self.engine, parser=self.parser)\n expected = df[df[\"index\"] > 5]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({\"index\": a, \"b\": np.random.randn(a.size)})\n result = df.query(\"ilevel_0 > 5\", engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({\"a\": a, \"b\": np.random.randn(a.size)})\n df.index.name = \"a\"\n result = df.query(\"a > 5\", engine=self.engine, parser=self.parser)\n expected = df[df.a > 5]\n tm.assert_frame_equal(result, expected)\n\n result = df.query(\"index > 5\", engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n def test_inf(self):\n n = 10\n df = DataFrame({\"a\": np.random.rand(n), \"b\": np.random.rand(n)})\n df.loc[::2, 0] = np.inf\n d = {\"==\": operator.eq, \"!=\": operator.ne}\n for op, f in d.items():\n q = f\"a {op} inf\"\n expected = df[f(df.a, np.inf)]\n result = df.query(q, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(result, expected)\n\n def test_check_tz_aware_index_query(self, tz_aware_fixture):\n # https://github.com/pandas-dev/pandas/issues/29463\n tz = tz_aware_fixture\n df_index = date_range(\n start=\"2019-01-01\", freq=\"1d\", periods=10, tz=tz, name=\"time\"\n )\n expected = DataFrame(index=df_index)\n df = DataFrame(index=df_index)\n result = df.query('\"2018-01-03 00:00:00+00\" < time')\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(df_index)\n result = df.reset_index().query('\"2018-01-03 00:00:00+00\" < time')\n tm.assert_frame_equal(result, expected)\n\n def test_method_calls_in_query(self):\n # https://github.com/pandas-dev/pandas/issues/22435\n n = 10\n df = DataFrame({\"a\": 2 * np.random.rand(n), \"b\": np.random.rand(n)})\n expected = df[df[\"a\"].astype(\"int\") == 0]\n result = df.query(\n \"a.astype('int') == 0\", engine=self.engine, parser=self.parser\n )\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(\n {\n \"a\": np.where(np.random.rand(n) < 0.5, np.nan, np.random.randn(n)),\n \"b\": np.random.randn(n),\n }\n )\n expected = df[df[\"a\"].notnull()]\n result = df.query(\"a.notnull()\", engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(result, expected)\n\n\[email protected]_if_no_ne\nclass TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"numexpr\"\n cls.parser = \"python\"\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randn(5, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=5)\n df[\"dates2\"] = date_range(\"1/1/2013\", periods=5)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=5)\n res = df.query(\n \"(dates1 < 20130101) & (20130101 < dates3)\", engine=engine, parser=parser\n )\n expec = df[(df.dates1 < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(np.random.randn(n, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n df[\"dates2\"] = date_range(\"1/1/2013\", periods=n)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n df.loc[np.random.rand(n) > 0.5, \"dates1\"] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, \"dates3\"] = pd.NaT\n res = df.query(\n \"(dates1 < 20130101) & (20130101 < dates3)\", engine=engine, parser=parser\n )\n expec = df[(df.dates1 < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(np.random.randn(n, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n return_value = df.set_index(\"dates1\", inplace=True, drop=True)\n assert return_value is None\n res = df.query(\n \"(index < 20130101) & (20130101 < dates3)\", engine=engine, parser=parser\n )\n expec = df[(df.index < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(np.random.randn(n, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n df.iloc[0, 0] = pd.NaT\n return_value = df.set_index(\"dates1\", inplace=True, drop=True)\n assert return_value is None\n res = df.query(\n \"(index < 20130101) & (20130101 < dates3)\", engine=engine, parser=parser\n )\n expec = df[(df.index < \"20130101\") & (\"20130101\" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(np.random.randn(n, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=n)\n df[\"dates3\"] = date_range(\"1/1/2014\", periods=n)\n df.loc[np.random.rand(n) > 0.5, \"dates1\"] = pd.NaT\n return_value = df.set_index(\"dates1\", inplace=True, drop=True)\n assert return_value is None\n msg = r\"'BoolOp' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(\"index < 20130101 < dates3\", engine=engine, parser=parser)\n\n def test_nested_scope(self):\n from pandas.core.computation.ops import UndefinedVariableError\n\n engine = self.engine\n parser = self.parser\n # smoke test\n x = 1 # noqa\n result = pd.eval(\"x + 1\", engine=engine, parser=parser)\n assert result == 2\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n\n # don't have the pandas parser\n msg = r\"The '@' prefix is only supported by the pandas parser\"\n with pytest.raises(SyntaxError, match=msg):\n df.query(\"(@df>0) & (@df2>0)\", engine=engine, parser=parser)\n\n with pytest.raises(UndefinedVariableError, match=\"name 'df' is not defined\"):\n df.query(\"(df>0) & (df2>0)\", engine=engine, parser=parser)\n\n expected = df[(df > 0) & (df2 > 0)]\n result = pd.eval(\"df[(df > 0) & (df2 > 0)]\", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n result = pd.eval(\n \"df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\", engine=engine, parser=parser\n )\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"python\"\n cls.parser = \"pandas\"\n\n def test_query_builtin(self):\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list(\"abc\"))\n\n df.index.name = \"sin\"\n expected = df[df.index > 5]\n result = df.query(\"sin > 5\", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = cls.parser = \"python\"\n\n def test_query_builtin(self):\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list(\"abc\"))\n\n df.index.name = \"sin\"\n expected = df[df.index > 5]\n result = df.query(\"sin > 5\", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryStrings:\n def test_str_query_method(self, parser, engine):\n df = DataFrame(np.random.randn(10, 1), columns=[\"b\"])\n df[\"strings\"] = Series(list(\"aabbccddee\"))\n expect = df[df.strings == \"a\"]\n\n if parser != \"pandas\":\n col = \"strings\"\n lst = '\"a\"'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = \"==\", \"!=\"\n ops = 2 * ([eq] + [ne])\n msg = r\"'(Not)?In' nodes are not implemented\"\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = f\"{lhs} {op} {rhs}\"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(\n ex,\n engine=engine,\n parser=parser,\n local_dict={\"strings\": df.strings},\n )\n else:\n res = df.query('\"a\" == strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('strings == \"a\"', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n tm.assert_frame_equal(res, df[df.strings.isin([\"a\"])])\n\n expect = df[df.strings != \"a\"]\n res = df.query('strings != \"a\"', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('\"a\" != strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n tm.assert_frame_equal(res, df[~df.strings.isin([\"a\"])])\n\n def test_str_list_query_method(self, parser, engine):\n df = DataFrame(np.random.randn(10, 1), columns=[\"b\"])\n df[\"strings\"] = Series(list(\"aabbccddee\"))\n expect = df[df.strings.isin([\"a\", \"b\"])]\n\n if parser != \"pandas\":\n col = \"strings\"\n lst = '[\"a\", \"b\"]'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = \"==\", \"!=\"\n ops = 2 * ([eq] + [ne])\n msg = r\"'(Not)?In' nodes are not implemented\"\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = f\"{lhs} {op} {rhs}\"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(ex, engine=engine, parser=parser)\n else:\n res = df.query('strings == [\"a\", \"b\"]', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] == strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n expect = df[~df.strings.isin([\"a\", \"b\"])]\n\n res = df.query('strings != [\"a\", \"b\"]', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] != strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n def test_query_with_string_columns(self, parser, engine):\n df = DataFrame(\n {\n \"a\": list(\"aaaabbbbcccc\"),\n \"b\": list(\"aabbccddeeff\"),\n \"c\": np.random.randint(5, size=12),\n \"d\": np.random.randint(9, size=12),\n }\n )\n if parser == \"pandas\":\n res = df.query(\"a in b\", parser=parser, engine=engine)\n expec = df[df.a.isin(df.b)]\n tm.assert_frame_equal(res, expec)\n\n res = df.query(\"a in b and c < d\", parser=parser, engine=engine)\n expec = df[df.a.isin(df.b) & (df.c < df.d)]\n tm.assert_frame_equal(res, expec)\n else:\n msg = r\"'(Not)?In' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(\"a in b\", parser=parser, engine=engine)\n\n msg = r\"'BoolOp' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(\"a in b and c < d\", parser=parser, engine=engine)\n\n def test_object_array_eq_ne(self, parser, engine):\n df = DataFrame(\n {\n \"a\": list(\"aaaabbbbcccc\"),\n \"b\": list(\"aabbccddeeff\"),\n \"c\": np.random.randint(5, size=12),\n \"d\": np.random.randint(9, size=12),\n }\n )\n res = df.query(\"a == b\", parser=parser, engine=engine)\n exp = df[df.a == df.b]\n tm.assert_frame_equal(res, exp)\n\n res = df.query(\"a != b\", parser=parser, engine=engine)\n exp = df[df.a != df.b]\n tm.assert_frame_equal(res, exp)\n\n def test_query_with_nested_strings(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n events = [\n f\"page {n} {act}\" for n in range(1, 4) for act in [\"load\", \"exit\"]\n ] * 2\n stamps1 = date_range(\"2014-01-01 0:00:01\", freq=\"30s\", periods=6)\n stamps2 = date_range(\"2014-02-01 1:00:01\", freq=\"30s\", periods=6)\n df = DataFrame(\n {\n \"id\": np.arange(1, 7).repeat(2),\n \"event\": events,\n \"timestamp\": stamps1.append(stamps2),\n }\n )\n\n expected = df[df.event == '\"page 1 load\"']\n res = df.query(\"\"\"'\"page 1 load\"' in event\"\"\", parser=parser, engine=engine)\n tm.assert_frame_equal(expected, res)\n\n def test_query_with_nested_special_character(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n df = DataFrame({\"a\": [\"a\", \"b\", \"test & test\"], \"b\": [1, 2, 3]})\n res = df.query('a == \"test & test\"', parser=parser, engine=engine)\n expec = df[df.a == \"test & test\"]\n tm.assert_frame_equal(res, expec)\n\n def test_query_lex_compare_strings(self, parser, engine):\n\n a = Series(np.random.choice(list(\"abcde\"), 20))\n b = Series(np.arange(a.size))\n df = DataFrame({\"X\": a, \"Y\": b})\n\n ops = {\"<\": operator.lt, \">\": operator.gt, \"<=\": operator.le, \">=\": operator.ge}\n\n for op, func in ops.items():\n res = df.query(f'X {op} \"d\"', engine=engine, parser=parser)\n expected = df[func(df.X, \"d\")]\n tm.assert_frame_equal(res, expected)\n\n def test_query_single_element_booleans(self, parser, engine):\n columns = \"bid\", \"bidsize\", \"ask\", \"asksize\"\n data = np.random.randint(2, size=(1, len(columns))).astype(bool)\n df = DataFrame(data, columns=columns)\n res = df.query(\"bid & ask\", engine=engine, parser=parser)\n expected = df[df.bid & df.ask]\n tm.assert_frame_equal(res, expected)\n\n def test_query_string_scalar_variable(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n df = DataFrame(\n {\n \"Symbol\": [\"BUD US\", \"BUD US\", \"IBM US\", \"IBM US\"],\n \"Price\": [109.70, 109.72, 183.30, 183.35],\n }\n )\n e = df[df.Symbol == \"BUD US\"]\n symb = \"BUD US\" # noqa\n r = df.query(\"Symbol == @symb\", parser=parser, engine=engine)\n tm.assert_frame_equal(e, r)\n\n\nclass TestDataFrameEvalWithFrame:\n def setup_method(self, method):\n self.frame = DataFrame(np.random.randn(10, 3), columns=list(\"abc\"))\n\n def teardown_method(self, method):\n del self.frame\n\n def test_simple_expr(self, parser, engine):\n res = self.frame.eval(\"a + b\", engine=engine, parser=parser)\n expect = self.frame.a + self.frame.b\n tm.assert_series_equal(res, expect)\n\n def test_bool_arith_expr(self, parser, engine):\n res = self.frame.eval(\"a[a < 1] + b\", engine=engine, parser=parser)\n expect = self.frame.a[self.frame.a < 1] + self.frame.b\n tm.assert_series_equal(res, expect)\n\n @pytest.mark.parametrize(\"op\", [\"+\", \"-\", \"*\", \"/\"])\n def test_invalid_type_for_operator_raises(self, parser, engine, op):\n df = DataFrame({\"a\": [1, 2], \"b\": [\"c\", \"d\"]})\n msg = r\"unsupported operand type\\(s\\) for .+: '.+' and '.+'\"\n\n with pytest.raises(TypeError, match=msg):\n df.eval(f\"a {op} b\", engine=engine, parser=parser)\n\n\nclass TestDataFrameQueryBacktickQuoting:\n @pytest.fixture(scope=\"class\")\n def df(self):\n \"\"\"\n Yields a dataframe with strings that may or may not need escaping\n by backticks. The last two columns cannot be escaped by backticks\n and should raise a ValueError.\n \"\"\"\n yield DataFrame(\n {\n \"A\": [1, 2, 3],\n \"B B\": [3, 2, 1],\n \"C C\": [4, 5, 6],\n \"C C\": [7, 4, 3],\n \"C_C\": [8, 9, 10],\n \"D_D D\": [11, 1, 101],\n \"E.E\": [6, 3, 5],\n \"F-F\": [8, 1, 10],\n \"1e1\": [2, 4, 8],\n \"def\": [10, 11, 2],\n \"A (x)\": [4, 1, 3],\n \"B(x)\": [1, 1, 5],\n \"B (x)\": [2, 7, 4],\n \" &^ :!€$?(} > <++*'' \": [2, 5, 6],\n \"\": [10, 11, 1],\n \" A\": [4, 7, 9],\n \" \": [1, 2, 1],\n \"it's\": [6, 3, 1],\n \"that's\": [9, 1, 8],\n \"☺\": [8, 7, 6],\n \"foo#bar\": [2, 4, 5],\n 1: [5, 7, 9],\n }\n )\n\n def test_single_backtick_variable_query(self, df):\n res = df.query(\"1 < `B B`\")\n expect = df[1 < df[\"B B\"]]\n tm.assert_frame_equal(res, expect)\n\n def test_two_backtick_variables_query(self, df):\n res = df.query(\"1 < `B B` and 4 < `C C`\")\n expect = df[(1 < df[\"B B\"]) & (4 < df[\"C C\"])]\n tm.assert_frame_equal(res, expect)\n\n def test_single_backtick_variable_expr(self, df):\n res = df.eval(\"A + `B B`\")\n expect = df[\"A\"] + df[\"B B\"]\n tm.assert_series_equal(res, expect)\n\n def test_two_backtick_variables_expr(self, df):\n res = df.eval(\"`B B` + `C C`\")\n expect = df[\"B B\"] + df[\"C C\"]\n tm.assert_series_equal(res, expect)\n\n def test_already_underscore_variable(self, df):\n res = df.eval(\"`C_C` + A\")\n expect = df[\"C_C\"] + df[\"A\"]\n tm.assert_series_equal(res, expect)\n\n def test_same_name_but_underscores(self, df):\n res = df.eval(\"C_C + `C C`\")\n expect = df[\"C_C\"] + df[\"C C\"]\n tm.assert_series_equal(res, expect)\n\n def test_mixed_underscores_and_spaces(self, df):\n res = df.eval(\"A + `D_D D`\")\n expect = df[\"A\"] + df[\"D_D D\"]\n tm.assert_series_equal(res, expect)\n\n def test_backtick_quote_name_with_no_spaces(self, df):\n res = df.eval(\"A + `C_C`\")\n expect = df[\"A\"] + df[\"C_C\"]\n tm.assert_series_equal(res, expect)\n\n def test_special_characters(self, df):\n res = df.eval(\"`E.E` + `F-F` - A\")\n expect = df[\"E.E\"] + df[\"F-F\"] - df[\"A\"]\n tm.assert_series_equal(res, expect)\n\n def test_start_with_digit(self, df):\n res = df.eval(\"A + `1e1`\")\n expect = df[\"A\"] + df[\"1e1\"]\n tm.assert_series_equal(res, expect)\n\n def test_keyword(self, df):\n res = df.eval(\"A + `def`\")\n expect = df[\"A\"] + df[\"def\"]\n tm.assert_series_equal(res, expect)\n\n def test_unneeded_quoting(self, df):\n res = df.query(\"`A` > 2\")\n expect = df[df[\"A\"] > 2]\n tm.assert_frame_equal(res, expect)\n\n def test_parenthesis(self, df):\n res = df.query(\"`A (x)` > 2\")\n expect = df[df[\"A (x)\"] > 2]\n tm.assert_frame_equal(res, expect)\n\n def test_empty_string(self, df):\n res = df.query(\"`` > 5\")\n expect = df[df[\"\"] > 5]\n tm.assert_frame_equal(res, expect)\n\n def test_multiple_spaces(self, df):\n res = df.query(\"`C C` > 5\")\n expect = df[df[\"C C\"] > 5]\n tm.assert_frame_equal(res, expect)\n\n def test_start_with_spaces(self, df):\n res = df.eval(\"` A` + ` `\")\n expect = df[\" A\"] + df[\" \"]\n tm.assert_series_equal(res, expect)\n\n def test_lots_of_operators_string(self, df):\n res = df.query(\"` &^ :!€$?(} > <++*'' ` > 4\")\n expect = df[df[\" &^ :!€$?(} > <++*'' \"] > 4]\n tm.assert_frame_equal(res, expect)\n\n def test_missing_attribute(self, df):\n message = \"module 'pandas' has no attribute 'thing'\"\n with pytest.raises(AttributeError, match=message):\n df.eval(\"@pd.thing\")\n\n def test_failing_quote(self, df):\n msg = r\"(Could not convert ).*( to a valid Python identifier.)\"\n with pytest.raises(SyntaxError, match=msg):\n df.query(\"`it's` > `that's`\")\n\n def test_failing_character_outside_range(self, df):\n msg = r\"(Could not convert ).*( to a valid Python identifier.)\"\n with pytest.raises(SyntaxError, match=msg):\n df.query(\"`☺` > 4\")\n\n def test_failing_hashtag(self, df):\n msg = \"Failed to parse backticks\"\n with pytest.raises(SyntaxError, match=msg):\n df.query(\"`foo#bar` > 4\")\n\n def test_call_non_named_expression(self, df):\n \"\"\"\n Only attributes and variables ('named functions') can be called.\n .__call__() is not an allowed attribute because that would allow\n calling anything.\n https://github.com/pandas-dev/pandas/pull/32460\n \"\"\"\n\n def func(*_):\n return 1\n\n funcs = [func] # noqa\n\n df.eval(\"@func()\")\n\n with pytest.raises(TypeError, match=\"Only named functions are supported\"):\n df.eval(\"@funcs[0]()\")\n\n with pytest.raises(TypeError, match=\"Only named functions are supported\"):\n df.eval(\"@funcs[0].__call__()\")\n"
] |
[
[
"pandas.eval",
"numpy.random.choice",
"numpy.isnan",
"numpy.arange",
"numpy.tile",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame",
"pandas._testing.makeCustomDataframe",
"numpy.random.randn",
"numpy.random.rand",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mpashchenkov/open_model_zoo
|
[
"ee811e1dc0ac79aba46b82322102a728b9ba5de3"
] |
[
"tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/segnet_background_matting.py"
] |
[
"\"\"\"\nCopyright (c) 2018-2021 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nfrom .sr_evaluator import SuperResolutionFeedbackEvaluator\nfrom .base_models import BaseCascadeModel, create_model, BaseDLSDKModel, BaseONNXModel, BaseOpenVINOModel\nfrom ...adapters import create_adapter\nfrom ...utils import contains_any, contains_all, generate_layer_name, extract_image_representations\nfrom ...config import ConfigError\n\n\nclass FeedbackModel:\n def set_feedback(self, feedback):\n if np.ndim(feedback) == 2:\n feedback = np.expand_dims(feedback, -1)\n if np.shape(feedback)[0] == 1:\n feedback = np.transpose(feedback, (1, 2, 0))\n if feedback.max() > 1:\n feedback = feedback.astype(np.float32) / 255\n self.feedback = feedback\n self._feedback_shape = feedback.shape\n\n def reset_state(self):\n if self._feedback_shape is None:\n self.feedback = None\n else:\n self.feedback = np.zeros(self._feedback_shape)\n\n\nclass ONNXFeedbackModel(FeedbackModel, BaseONNXModel):\n def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):\n self.feedback = None\n self._feedback_shape = None\n self.adapter = create_adapter(network_info.get('adapter', 'background_matting'))\n super().__init__(network_info, launcher, suffix, delayed_model_loading)\n\n def predict(self, identifiers, input_data):\n raw_results = self.inference_session.run((self.output_blob.name,), self.fit_to_input(input_data))\n results = self.adapter.process([{self.output_blob.name: raw_results[0]}], identifiers, [{}])\n\n return {self.output_blob: raw_results[0]}, results[0]\n\n def fit_to_input(self, input_data):\n if self.feedback is None:\n h, w = input_data.shape[:2]\n self.feedback = np.zeros((h, w, 1), dtype=np.float32)\n return {\n self.input_blob.name: np.expand_dims(\n np.transpose(np.concatenate([input_data, self.feedback], -1), (2, 0, 1)), 0\n ).astype(np.float32)\n }\n\n\nclass DLSDKFeedbackModel(FeedbackModel, BaseDLSDKModel):\n def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):\n self.feedback = None\n self._feedback_shape = None\n self.adapter = create_adapter(network_info.get('adapter', 'background_matting'))\n super().__init__(network_info, launcher, suffix, delayed_model_loading)\n\n def predict(self, identifiers, input_data):\n data = self.fit_to_input(input_data)\n if not self.is_dynamic and self.dynamic_inputs:\n self._reshape_input({key: in_data.shape for key, in_data in data.items()})\n raw_result = self.exec_network.infer(data)\n result = self.adapter.process([raw_result], identifiers, [{}])\n return raw_result, result[0]\n\n def fit_to_input(self, input_data):\n if self.feedback is None:\n h, w = input_data.shape[:2]\n self.feedback = np.zeros((h, w, 1), dtype=np.float32)\n return {self.input_blob: np.expand_dims(\n np.transpose(np.concatenate([input_data, self.feedback], -1), (2, 0, 1)), 0\n )}\n\n def set_input_and_output(self):\n has_info = hasattr(self.exec_network, 'input_info')\n input_info = self.exec_network.input_info if has_info else self.exec_network.inputs\n input_blob = next(iter(input_info))\n with_prefix = input_blob.startswith(self.default_model_suffix + '_')\n if self.input_blob is None:\n self.input_blob = input_blob\n self.output_blob = next(iter(self.exec_network.outputs))\n if with_prefix != self.with_prefix:\n self.input_blob = generate_layer_name(self.input_blob, self.default_model_suffix, with_prefix)\n self.output_blob = generate_layer_name(self.output_blob, self.default_model_suffix, with_prefix)\n self.adapter.output_blob = self.output_blob\n\n self.with_prefix = with_prefix\n\n def load_network(self, network, launcher):\n super().load_network(network, launcher)\n self.set_input_and_output()\n\n\nclass OpenVINOFeedbackModel(FeedbackModel, BaseOpenVINOModel):\n def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):\n self.feedback = None\n self._feedback_shape = None\n self.adapter = create_adapter(network_info.get('adapter', 'background_matting'))\n super().__init__(network_info, launcher, suffix, delayed_model_loading)\n\n def predict(self, identifiers, input_data):\n data = self.fit_to_input(input_data)\n if not self.is_dynamic and self.dynamic_inputs:\n self._reshape_input({key: in_data.shape for key, in_data in data.items()})\n raw_result = self.infer(data)\n result = self.adapter.process([raw_result], identifiers, [{}])\n return raw_result, result[0]\n\n def fit_to_input(self, input_data):\n if self.feedback is None:\n h, w = input_data.shape[:2]\n self.feedback = np.zeros((h, w, 1), dtype=np.float32)\n return {self.input_blob: np.expand_dims(\n np.transpose(np.concatenate([input_data, self.feedback], -1), (2, 0, 1)), 0\n )}\n\n def set_input_and_output(self):\n input_blob = next(iter(self.inputs))\n with_prefix = input_blob.startswith(self.default_model_suffix + '_')\n if self.input_blob is None:\n self.input_blob = input_blob\n self.output_blob = next(iter(self.exec_network.outputs))\n if with_prefix != self.with_prefix:\n self.input_blob = generate_layer_name(self.input_blob, self.default_model_suffix, with_prefix)\n self.output_blob = generate_layer_name(self.output_blob, self.default_model_suffix, with_prefix)\n self.adapter.output_blob = self.output_blob\n\n self.with_prefix = with_prefix\n\n def load_network(self, network, launcher):\n super().load_network(network, launcher)\n self.set_input_and_output()\n\n\nclass VideoBackgroundMatting(SuperResolutionFeedbackEvaluator):\n @classmethod\n def from_configs(cls, config, delayed_model_loading=False, orig_config=None):\n dataset_config, launcher, _ = cls.get_dataset_and_launcher_info(config)\n model = SegnetModel(\n config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob'),\n delayed_model_loading\n )\n return cls(dataset_config, launcher, model, orig_config)\n\n def _process(self, output_callback, calculate_metrics, progress_reporter, metric_config, csv_file):\n previous_video_id = ''\n for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):\n if previous_video_id != batch_identifiers[0].video_id:\n self.model.reset()\n batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)\n batch_inputs_extr, _ = extract_image_representations(batch_inputs)\n batch_raw_prediction, batch_prediction = self.model.predict(\n batch_identifiers, batch_inputs_extr\n )\n self.model.set_feedback(batch_prediction[0].value)\n previous_video_id = batch_prediction[0].identifier.video_id\n annotation, prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction)\n metrics_result = self._get_metrics_result(batch_input_ids, annotation, prediction, calculate_metrics)\n if output_callback:\n output_callback(batch_raw_prediction[0], metrics_result=metrics_result,\n element_identifiers=batch_identifiers, dataset_indices=batch_input_ids)\n self._update_progress(progress_reporter, metric_config, batch_id, len(prediction), csv_file)\n\n\nclass SegnetModel(BaseCascadeModel):\n def __init__(self, network_info, launcher, models_args, is_blob, delayed_model_loading=False):\n super().__init__(network_info, launcher)\n if models_args and not delayed_model_loading:\n model = network_info.get('segnet_model', {})\n if not contains_any(model, ['model', 'onnx_model']) and models_args:\n model['model'] = models_args[0]\n model['_model_is_blob'] = is_blob\n network_info.update({'segnet_model': model})\n if not contains_all(network_info, ['segnet_model']) and not delayed_model_loading:\n raise ConfigError('network_info should contain segnet_model field')\n self._model_mapping = {\n 'dlsdk': DLSDKFeedbackModel,\n 'openvino': OpenVINOFeedbackModel,\n 'onnx_runtime': ONNXFeedbackModel,\n }\n self.model = create_model(network_info['segnet_model'], launcher, self._model_mapping, 'segnet_model',\n delayed_model_loading)\n self._part_by_name = {'segnet_model': self.model}\n\n def predict(self, identifiers, input_data):\n predictions, raw_outputs = [], []\n for data in input_data:\n output, prediction = self.model.predict(identifiers, data)\n raw_outputs.append(output)\n predictions.append(prediction)\n return raw_outputs, predictions\n\n def reset(self):\n self.model.reset_state()\n\n def set_feedback(self, feedback):\n self.model.set_feedback(feedback)\n"
] |
[
[
"numpy.expand_dims",
"numpy.ndim",
"numpy.concatenate",
"numpy.shape",
"numpy.transpose",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
james-yoo/onshape-to-robot
|
[
"c48bdfebb9f8aacb7a2d55bc063363364e74a46f"
] |
[
"onshape-to-robot.py"
] |
[
"#!/usr/bin/env python\nimport numpy as np\nfrom onshape_api.client import Client\nfrom copy import copy\nfrom robot import RobotURDF, RobotSDF\nimport sys\nimport os\nimport json\nimport csg\n\n# Loading configuration\nrobot = 'robots/demo/'\nif len(sys.argv) > 1:\n robot = sys.argv[1]\n\nconfigFile = robot+'/config.json'\nclient = Client(logging=False, creds=configFile)\nconfig = json.load(open(configFile))\n\ndef configGet(name, default=None):\n global config\n if name in config:\n return config[name]\n else:\n if default is None:\n print('! ERROR missing key \"'+name+'\" in config')\n exit()\n else:\n return default\n\ndocumentId = configGet('documentId')\ndrawFrames = configGet('drawFrames')\ndrawCollisions = configGet('drawCollisions', False)\nuseScads = configGet('useScads', True)\nassemblyName = configGet('assemblyName', False)\noutputFormat = configGet('outputFormat', 'urdf')\njointMaxEffort = configGet('jointMaxEffort', 1)\njointMaxVelocity = configGet('jointMaxVelocity', 20)\nnoDynamics =configGet('noDynamics', False)\noutputDirectory = robot\ntmp = configGet('dynamics', {})\ndynamicsOverride = {}\nfor key in tmp:\n dynamicsOverride[key.lower()] = tmp[key]\n\ntry:\n os.makedirs(outputDirectory)\nexcept OSError:\n pass\n\nprint('* Retrieving workspace ID ...')\ndocument = client.get_document(documentId).json()\nworkspaceId = document['defaultWorkspace']['id']\nprint('- Workspace id: '+workspaceId)\n\nprint('* Retrieving elements in the document, searching for the assembly...')\nelements = client.list_elements(documentId).json()\nassemblyId = None\nfor element in elements:\n if element['type'] == 'Assembly' and (assemblyName is False or element['name'].lower() == assemblyName):\n print(\"- Found assembly, id: \"+element['id']+', name: \"'+element['name']+'\"')\n assemblyId = element['id']\n\nif assemblyId == None:\n print(\"! Unable to find assembly in this document\")\n exit(1)\n\nprint('* Retrieving assembly')\nassembly = client.get_assembly(documentId, workspaceId, assemblyId)\n\n# Collecting parts instance from assembly and subassemblies\ninstances = {}\nfirstInstance = None\ndef collectParts(instancesToWalk):\n global firstInstance\n for instance in instancesToWalk:\n if firstInstance is None:\n firstInstance = instance['id']\n instances[instance['id']] = instance\n\nroot = assembly['rootAssembly']\ncollectParts(root['instances'])\nfor asm in assembly['subAssemblies']:\n collectParts(asm['instances'])\n\n# Collecting occurences\noccurrences = {}\nfor occurrence in root['occurrences']:\n occurrence['assignation'] = None\n occurrence['instance'] = instances[occurrence['path'][-1]]\n occurrence['transform'] = np.matrix(np.reshape(occurrence['transform'], (4, 4)))\n occurrences[tuple(occurrence['path'])] = occurrence\n\n# Gets an occurrence given its path\ndef getOccurrence(path):\n return occurrences[tuple(path)]\n\nassignations = {}\nframes = {}\ndef assignParts(root, parent):\n assignations[root] = parent\n for occurrence in occurrences.values():\n if occurrence['path'][0] == root:\n occurrence['assignation'] = parent\n\nprint('* Getting assembly features, scanning for DOFs...')\ntrunk = None\nrelations = {}\ntopLevels = set()\nfeatures = root['features']\nfor feature in features:\n data = feature['featureData']\n\n child = data['matedEntities'][0]['matedOccurrence'][0]\n parent = data['matedEntities'][1]['matedOccurrence'][0]\n\n if data['name'][0:3] == 'dof':\n parts = data['name'].split('_')\n del parts[0]\n data['inverted'] = False\n if parts[-1] == 'inv' or parts[-1] == 'inverted':\n data['inverted'] = True\n del parts[-1]\n name = '_'.join(parts)\n if name == '':\n print('! Error: a DOF dones\\'t have any name (\"'+data['name']+'\" should be \"dof_...\")')\n exit()\n \n relations[child] = [parent, data, name]\n assignParts(child, child)\n assignParts(parent, parent)\n if child not in frames:\n frames[child] = []\n if parent not in frames:\n frames[parent] = []\n \nprint('- Found '+str(len(relations))+' DOFs')\n\n# If we have no DOF\nif len(relations) == 0:\n trunk = firstInstance\n assignParts(firstInstance, firstInstance)\n\n# Spreading parts assignations\nchanged = True\nwhile changed:\n changed = False\n for feature in features:\n data = feature['featureData']\n occurrenceA = data['matedEntities'][0]['matedOccurrence'][0]\n occurrenceB = data['matedEntities'][1]['matedOccurrence'][0]\n\n if (occurrenceA not in assignations) or (occurrenceB not in assignations):\n if data['name'][0:5] == 'frame':\n name = '_'.join(data['name'].split('_')[1:])\n if occurrenceA in assignations:\n frames[occurrenceA].append([name, data['matedEntities'][1]['matedOccurrence']])\n assignParts(occurrenceB, {True: assignations[occurrenceA], False: 'frame'}[drawFrames])\n changed = True\n if occurrenceB in assignations:\n frames[occurrenceB].append([name, data['matedEntities'][0]['matedOccurrence']])\n assignParts(occurrenceA, {True: assignations[occurrenceB], False: 'frame'}[drawFrames])\n changed = True\n else:\n if occurrenceA in assignations:\n assignParts(occurrenceB, assignations[occurrenceA])\n changed = True\n if occurrenceB in assignations:\n assignParts(occurrenceA, assignations[occurrenceB])\n changed = True\n \nprint('* Building robot tree')\ndef collect(id):\n part = {}\n part['id'] = id\n part['children'] = []\n for childId in relations:\n entry = relations[childId]\n if entry[0] == id:\n child = collect(childId)\n child['mate'] = entry[1]\n child['relation'] = entry[2]\n part['children'].append(child)\n return part\n\n# Searching for the trunk, assuming it is an element that has\n# not children\nfor childId in relations:\n entry = relations[childId]\n if entry[0] not in relations:\n trunk = entry[0]\n break\ntrunkOccurrence = getOccurrence([trunk])\nprint('Trunk is '+trunkOccurrence['instance']['name'])\n\nfor occurrence in occurrences.values():\n if occurrence['assignation'] is None:\n print('! WARNING, part ('+occurrence['instance']['name']+') has no assignation, connecting it with trunk')\n occurrence['assignation'] = trunk\n\ntree = collect(trunk)\n\nif outputFormat == 'urdf':\n robot = RobotURDF()\nelif outputFormat == 'sdf':\n robot = RobotSDF()\nelse:\n print('! ERROR Unknown output format: '+outputFormat+' (supported are urdf and sdf')\n exit()\nrobot.drawCollisions = drawCollisions\nrobot.jointMaxEffort = jointMaxEffort\nrobot.jointMaxVelocity = jointMaxVelocity\nrobot.noDynamics = noDynamics\n\n# Adds a part to the current robot link\ndef addPart(occurrence, matrix):\n global noDynamics, dynamicsOverride\n part = occurrence['instance']\n\n # Importing STL file for this part\n prefix = extractPartName(part['name'])\n stlFile = prefix+'.stl'\n stl = client.part_studio_stl_m(part['documentId'], part['documentMicroversion'], part['elementId'], part['partId'])\n f = open(outputDirectory+'/'+stlFile, 'wb')\n f.write(stl)\n f.close()\n\n # Import the SCAD files pure shapes\n shapes = None\n if useScads:\n scadFile = prefix+'.scad'\n if os.path.exists(outputDirectory+'/'+scadFile):\n shapes = csg.process(outputDirectory+'/'+scadFile)\n \n # Obtain metadatas about part to retrieve color\n metadata = client.part_get_metadata(part['documentId'], part['documentMicroversion'], part['elementId'], part['partId'])\n if 'appearance' in metadata:\n colors = metadata['appearance']['color']\n color = np.array([colors['red'], colors['green'], colors['blue']])/255.0\n else:\n color = [0.5, 0.5, 0.5]\n\n # Obtain mass properties about that part\n if noDynamics:\n mass = 0\n com = [0]*3\n inertia = [0]*12\n else:\n if prefix in dynamicsOverride:\n entry = dynamicsOverride[prefix]\n mass = entry['mass']\n com = entry['com']\n inertia = entry['inertia']\n else:\n massProperties = client.part_mass_properties(part['documentId'], part['documentMicroversion'], part['elementId'], part['partId'])\n massProperties = massProperties['bodies'][part['partId']]\n mass = massProperties['mass'][0]\n com = massProperties['centroid']\n inertia = massProperties['inertia']\n\n if abs(mass) < 1e-9:\n print('! WARNING Part '+part['name']+' has no mass, maybe you should assign a material to it ?')\n\n pose = occurrence['transform']\n if robot.relative:\n pose = np.linalg.inv(matrix)*pose\n robot.addPart(pose, stlFile, mass, com, inertia, color, shapes)\n\npartNames = {}\ndef extractPartName(name):\n parts = name.split(' ')\n del parts[-1]\n return '_'.join(parts).lower()\n\ndef processPartName(name):\n global partNames\n name = extractPartName(name)\n\n if name in partNames:\n partNames[name] += 1\n else:\n partNames[name] = 1\n\n return name+'_'+str(partNames[name])\n\ndef buildRobot(tree, matrix, linkPart=None):\n occurrence = getOccurrence([tree['id']])\n instance = occurrence['instance']\n print('~ Adding top-level instance ['+instance['name']+']')\n\n link = processPartName(instance['name'])\n\n # Collecting all children in the tree assigned to this top-level part\n robot.startLink(link, matrix)\n for occurrence in occurrences.values():\n if occurrence['assignation'] == tree['id'] and occurrence['instance']['type'] == 'Part':\n name = '_'.join(occurrence['path'])\n print('- Adding part '+occurrence['instance']['name'])\n addPart(occurrence, matrix)\n robot.endLink()\n\n # Adding the frames (linkage is relative to parent)\n if tree['id'] in frames:\n for name, part in frames[tree['id']]:\n frame = getOccurrence(part)['transform']\n if robot.relative:\n frame = np.linalg.inv(matrix)*frame\n robot.addFrame(name, frame)\n\n # Calling the function with recursion for children\n k = 0\n for child in tree['children']:\n mate = child['mate']\n if mate['matedEntities'][0]['matedOccurrence'][0] == tree['id']:\n matedOccurrence = mate['matedEntities'][1]\n else:\n matedOccurrence = mate['matedEntities'][0]\n childLinkPart = getOccurrence(matedOccurrence['matedOccurrence'])\n\n worldAxisFrame = childLinkPart['transform']\n origin = matedOccurrence['matedCS']['origin']\n zAxis = np.array(matedOccurrence['matedCS']['zAxis'])\n\n if mate['inverted']:\n zAxis = -zAxis\n\n translation = np.matrix(np.identity(4))\n translation[0, 3] += origin[0]\n translation[1, 3] += origin[1]\n translation[2, 3] += origin[2]\n worldAxisFrame = worldAxisFrame * translation\n\n childMatrix = matrix\n axisFrame = worldAxisFrame\n if robot.relative:\n axisFrame = np.linalg.inv(matrix)*axisFrame\n childMatrix = worldAxisFrame\n\n subLink = buildRobot(child, childMatrix, '_'.join(childLinkPart['path']))\n robot.addJoint(link, subLink, axisFrame, child['relation'], zAxis)\n\n return link\n\n# Start building the robot\nbuildRobot(tree, np.matrix(np.identity(4)))\nrobot.finalize()\n# print(tree)\n\nprint(\"* Writing \"+robot.ext+\" file\")\nf = open(outputDirectory+'/robot.'+robot.ext, 'w')\nf.write(robot.xml)\nf.close()\n"
] |
[
[
"numpy.reshape",
"numpy.array",
"numpy.identity",
"numpy.linalg.inv"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
towzeur/vision
|
[
"cabf5876e2a16bf3132a63c7c19cfc35c7c241e1"
] |
[
"torchvision/models/efficientnet.py"
] |
[
"import copy\nimport math\nfrom functools import partial\nfrom typing import Any, Callable, Optional, List, Sequence\n\nimport torch\nfrom torch import nn, Tensor\nfrom torchvision.ops import StochasticDepth\n\nfrom .._internally_replaced_utils import load_state_dict_from_url\nfrom ..ops.misc import ConvNormActivation, SqueezeExcitation\nfrom ..utils import _log_api_usage_once\nfrom ._utils import _make_divisible\n\n\n__all__ = [\n \"EfficientNet\",\n \"efficientnet_b0\",\n \"efficientnet_b1\",\n \"efficientnet_b2\",\n \"efficientnet_b3\",\n \"efficientnet_b4\",\n \"efficientnet_b5\",\n \"efficientnet_b6\",\n \"efficientnet_b7\",\n]\n\n\nmodel_urls = {\n # Weights ported from https://github.com/rwightman/pytorch-image-models/\n \"efficientnet_b0\": \"https://download.pytorch.org/models/efficientnet_b0_rwightman-3dd342df.pth\",\n \"efficientnet_b1\": \"https://download.pytorch.org/models/efficientnet_b1_rwightman-533bc792.pth\",\n \"efficientnet_b2\": \"https://download.pytorch.org/models/efficientnet_b2_rwightman-bcdf34b7.pth\",\n \"efficientnet_b3\": \"https://download.pytorch.org/models/efficientnet_b3_rwightman-cf984f9c.pth\",\n \"efficientnet_b4\": \"https://download.pytorch.org/models/efficientnet_b4_rwightman-7eb33cd5.pth\",\n # Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/\n \"efficientnet_b5\": \"https://download.pytorch.org/models/efficientnet_b5_lukemelas-b6417697.pth\",\n \"efficientnet_b6\": \"https://download.pytorch.org/models/efficientnet_b6_lukemelas-c76e70fd.pth\",\n \"efficientnet_b7\": \"https://download.pytorch.org/models/efficientnet_b7_lukemelas-dcc49843.pth\",\n}\n\n\nclass MBConvConfig:\n # Stores information listed at Table 1 of the EfficientNet paper\n def __init__(\n self,\n expand_ratio: float,\n kernel: int,\n stride: int,\n input_channels: int,\n out_channels: int,\n num_layers: int,\n width_mult: float,\n depth_mult: float,\n ) -> None:\n self.expand_ratio = expand_ratio\n self.kernel = kernel\n self.stride = stride\n self.input_channels = self.adjust_channels(input_channels, width_mult)\n self.out_channels = self.adjust_channels(out_channels, width_mult)\n self.num_layers = self.adjust_depth(num_layers, depth_mult)\n\n def __repr__(self) -> str:\n s = (\n f\"{self.__class__.__name__}(\"\n f\"expand_ratio={self.expand_ratio}\"\n f\", kernel={self.kernel}\"\n f\", stride={self.stride}\"\n f\", input_channels={self.input_channels}\"\n f\", out_channels={self.out_channels}\"\n f\", num_layers={self.num_layers}\"\n f\")\"\n )\n return s\n\n @staticmethod\n def adjust_channels(channels: int, width_mult: float, min_value: Optional[int] = None) -> int:\n return _make_divisible(channels * width_mult, 8, min_value)\n\n @staticmethod\n def adjust_depth(num_layers: int, depth_mult: float):\n return int(math.ceil(num_layers * depth_mult))\n\n\nclass MBConv(nn.Module):\n def __init__(\n self,\n cnf: MBConvConfig,\n stochastic_depth_prob: float,\n norm_layer: Callable[..., nn.Module],\n se_layer: Callable[..., nn.Module] = SqueezeExcitation,\n ) -> None:\n super().__init__()\n\n if not (1 <= cnf.stride <= 2):\n raise ValueError(\"illegal stride value\")\n\n self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels\n\n layers: List[nn.Module] = []\n activation_layer = nn.SiLU\n\n # expand\n expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)\n if expanded_channels != cnf.input_channels:\n layers.append(\n ConvNormActivation(\n cnf.input_channels,\n expanded_channels,\n kernel_size=1,\n norm_layer=norm_layer,\n activation_layer=activation_layer,\n )\n )\n\n # depthwise\n layers.append(\n ConvNormActivation(\n expanded_channels,\n expanded_channels,\n kernel_size=cnf.kernel,\n stride=cnf.stride,\n groups=expanded_channels,\n norm_layer=norm_layer,\n activation_layer=activation_layer,\n )\n )\n\n # squeeze and excitation\n squeeze_channels = max(1, cnf.input_channels // 4)\n layers.append(se_layer(expanded_channels, squeeze_channels, activation=partial(nn.SiLU, inplace=True)))\n\n # project\n layers.append(\n ConvNormActivation(\n expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None\n )\n )\n\n self.block = nn.Sequential(*layers)\n self.stochastic_depth = StochasticDepth(stochastic_depth_prob, \"row\")\n self.out_channels = cnf.out_channels\n\n def forward(self, input: Tensor) -> Tensor:\n result = self.block(input)\n if self.use_res_connect:\n result = self.stochastic_depth(result)\n result += input\n return result\n\n\nclass EfficientNet(nn.Module):\n def __init__(\n self,\n inverted_residual_setting: List[MBConvConfig],\n dropout: float,\n stochastic_depth_prob: float = 0.2,\n num_classes: int = 1000,\n block: Optional[Callable[..., nn.Module]] = None,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"\n EfficientNet main class\n\n Args:\n inverted_residual_setting (List[MBConvConfig]): Network structure\n dropout (float): The droupout probability\n stochastic_depth_prob (float): The stochastic depth probability\n num_classes (int): Number of classes\n block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet\n norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use\n \"\"\"\n super().__init__()\n _log_api_usage_once(self)\n\n if not inverted_residual_setting:\n raise ValueError(\"The inverted_residual_setting should not be empty\")\n elif not (\n isinstance(inverted_residual_setting, Sequence)\n and all([isinstance(s, MBConvConfig) for s in inverted_residual_setting])\n ):\n raise TypeError(\"The inverted_residual_setting should be List[MBConvConfig]\")\n\n if block is None:\n block = MBConv\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n\n layers: List[nn.Module] = []\n\n # building first layer\n firstconv_output_channels = inverted_residual_setting[0].input_channels\n layers.append(\n ConvNormActivation(\n 3, firstconv_output_channels, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=nn.SiLU\n )\n )\n\n # building inverted residual blocks\n total_stage_blocks = sum(cnf.num_layers for cnf in inverted_residual_setting)\n stage_block_id = 0\n for cnf in inverted_residual_setting:\n stage: List[nn.Module] = []\n for _ in range(cnf.num_layers):\n # copy to avoid modifications. shallow copy is enough\n block_cnf = copy.copy(cnf)\n\n # overwrite info if not the first conv in the stage\n if stage:\n block_cnf.input_channels = block_cnf.out_channels\n block_cnf.stride = 1\n\n # adjust stochastic depth probability based on the depth of the stage block\n sd_prob = stochastic_depth_prob * float(stage_block_id) / total_stage_blocks\n\n stage.append(block(block_cnf, sd_prob, norm_layer))\n stage_block_id += 1\n\n layers.append(nn.Sequential(*stage))\n\n # building last several layers\n lastconv_input_channels = inverted_residual_setting[-1].out_channels\n lastconv_output_channels = 4 * lastconv_input_channels\n layers.append(\n ConvNormActivation(\n lastconv_input_channels,\n lastconv_output_channels,\n kernel_size=1,\n norm_layer=norm_layer,\n activation_layer=nn.SiLU,\n )\n )\n\n self.features = nn.Sequential(*layers)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Sequential(\n nn.Dropout(p=dropout, inplace=True),\n nn.Linear(lastconv_output_channels, num_classes),\n )\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\")\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n init_range = 1.0 / math.sqrt(m.out_features)\n nn.init.uniform_(m.weight, -init_range, init_range)\n nn.init.zeros_(m.bias)\n\n def _forward_impl(self, x: Tensor) -> Tensor:\n x = self.features(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n\n x = self.classifier(x)\n\n return x\n\n def forward(self, x: Tensor) -> Tensor:\n return self._forward_impl(x)\n\n\ndef _efficientnet(\n arch: str,\n width_mult: float,\n depth_mult: float,\n dropout: float,\n pretrained: bool,\n progress: bool,\n **kwargs: Any,\n) -> EfficientNet:\n bneck_conf = partial(MBConvConfig, width_mult=width_mult, depth_mult=depth_mult)\n inverted_residual_setting = [\n bneck_conf(1, 3, 1, 32, 16, 1),\n bneck_conf(6, 3, 2, 16, 24, 2),\n bneck_conf(6, 5, 2, 24, 40, 2),\n bneck_conf(6, 3, 2, 40, 80, 3),\n bneck_conf(6, 5, 1, 80, 112, 3),\n bneck_conf(6, 5, 2, 112, 192, 4),\n bneck_conf(6, 3, 1, 192, 320, 1),\n ]\n model = EfficientNet(inverted_residual_setting, dropout, **kwargs)\n if pretrained:\n if model_urls.get(arch, None) is None:\n raise ValueError(f\"No checkpoint is available for model type {arch}\")\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef efficientnet_b0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B0 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\"efficientnet_b0\", 1.0, 1.0, 0.2, pretrained, progress, **kwargs)\n\n\ndef efficientnet_b1(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B1 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\"efficientnet_b1\", 1.0, 1.1, 0.2, pretrained, progress, **kwargs)\n\n\ndef efficientnet_b2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B2 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\"efficientnet_b2\", 1.1, 1.2, 0.3, pretrained, progress, **kwargs)\n\n\ndef efficientnet_b3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B3 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\"efficientnet_b3\", 1.2, 1.4, 0.3, pretrained, progress, **kwargs)\n\n\ndef efficientnet_b4(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B4 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\"efficientnet_b4\", 1.4, 1.8, 0.4, pretrained, progress, **kwargs)\n\n\ndef efficientnet_b5(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B5 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\n \"efficientnet_b5\",\n 1.6,\n 2.2,\n 0.4,\n pretrained,\n progress,\n norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),\n **kwargs,\n )\n\n\ndef efficientnet_b6(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B6 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\n \"efficientnet_b6\",\n 1.8,\n 2.6,\n 0.5,\n pretrained,\n progress,\n norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),\n **kwargs,\n )\n\n\ndef efficientnet_b7(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:\n \"\"\"\n Constructs a EfficientNet B7 architecture from\n `\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\" <https://arxiv.org/abs/1905.11946>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _efficientnet(\n \"efficientnet_b7\",\n 2.0,\n 3.1,\n 0.5,\n pretrained,\n progress,\n norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),\n **kwargs,\n )\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.init.uniform_",
"torch.nn.Linear",
"torch.nn.init.ones_",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten",
"torch.nn.init.zeros_",
"torch.nn.init.kaiming_normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
narminmammadsoy/taaraxtak
|
[
"8df538a23003d9b18120885eac9b78388f7be0d2",
"8df538a23003d9b18120885eac9b78388f7be0d2"
] |
[
"test/w3techs-test.py",
"src/shared/utils.py"
] |
[
"import src.w3techs.utils as utils\nimport src.w3techs.types as types\nimport src.shared.types as shared_types\n\nimport pandas as pd\nimport numpy as np\n\nimport psycopg2\nimport testing.postgresql\n\nimport pytest\n\n\[email protected](scope='function')\ndef postgresdb(request):\n '''Postgres testing mock'''\n postgresql = testing.postgresql.Postgresql()\n conn = psycopg2.connect(**postgresql.dsn())\n cur = conn.cursor()\n types.create_tables(cur, conn)\n\n def teardown():\n postgresql.stop()\n\n request.addfinalizer(teardown)\n\n return (cur, conn)\n\n#\n# scraper tests\n#\n\n\ndef read_html(pth):\n h = open(pth)\n h = '\\n'.join(h.readlines())\n return h\n\n\ndef test_single_table():\n '''\n Read a page with a single table.\n '''\n html = read_html('./test/w3techs-html/ex-single-table.html')\n df = utils.extract_table(html, double_table=False)\n assert(len(df) == 102)\n\n\ndef test_double_table():\n '''\n Read a page with a double table.\n '''\n html = read_html('./test/w3techs-html/ex-double-table.html')\n df = utils.extract_table(html, True)\n assert(len(df) == 12)\n\n\n#\n# types tests\n#\n\n\ndef test_create_tables(postgresdb):\n cur, conn = postgresdb\n\n\ndef test_provider_marketshare_type(postgresdb):\n cur, conn = postgresdb\n\n ex_ms = types.ProviderMarketshare(\n 'Foo', None, shared_types.Alpha2('NL'), 'ssl-certificate', 0.5, pd.Timestamp('2021-04-20')\n )\n ex_ms.write_to_db(cur, conn)\n\n cur.execute('SELECT * FROM provider_marketshare')\n item = cur.fetchone()\n assert(item[0] == 'Foo')\n\n\ndef test_pop_weighted_gini_type(postgresdb):\n cur, conn = postgresdb\n\n ex_g = types.PopWeightedGini(\n 'ssl-certificate', 0.9, pd.Timestamp('2021-04-20')\n )\n\n ex_g.write_to_db(cur, conn)\n\n cur.execute('SELECT * FROM pop_weighted_gini')\n item = cur.fetchone()\n assert(item[0] == 'ssl-certificate')\n\n#\n# utils tests\n#\n\n\ndef test_sum_proportions():\n '''\n summing up the total proportion of Internet users, we should get about 1.\n '''\n for tot in utils.prop_net_users.sum():\n assert(1 - tot < 0.02)\n\n\ndef test_gini_fn():\n # when everyone has the same amount, gini should be 1\n assert(\n utils.gini(np.array([0.25, 0.25, 0.25, 0.25])) == 0\n )\n\n # when one person has everything, gini should be very nearly one\n one_person_has_everything = [1] + np.zeros(100).tolist()\n assert(\n 1 - utils.gini(np.array(one_person_has_everything)) < 0.01\n )\n\n\ndef test_weighted_gini():\n marketshares = pd.Series([0.25, 0.25, 0.25, 0.25])\n population_shares = pd.Series([0.70, 0.10, 0.10, 0.10])\n assert(round(utils.weighted_gini(marketshares, population_shares), 2) ==\n 0.20)\n\n\ndef test_compute_pop_weighted_gini(postgresdb):\n # if there's nothing, it should reutrn one\n cur, conn = postgresdb\n res = utils.population_weighted_gini(\n cur,\n 'fake-market',\n pd.Timestamp('2021-01-20'),\n )\n assert(res is None)\n\n # add a provider marketshare\n # tiny netherlands has 50% of the world's market\n types.ProviderMarketshare(\n 'Foo', None, shared_types.Alpha2('NL'), 'ssl-certificate',\n 0.5, pd.Timestamp('2021-04-20')\n ).write_to_db(cur, conn)\n # US has the rest\n types.ProviderMarketshare(\n 'Foo', None, shared_types.Alpha2('US'), 'ssl-certificate',\n 0.5, pd.Timestamp('2021-04-20')\n ).write_to_db(cur, conn)\n\n res = utils.population_weighted_gini(\n cur, 'ssl-certificate', pd.Timestamp('2021-04-20')\n )\n # should result in a gini of 0.99\n assert(round(res.gini, 2) == 0.99)\n",
"from os import path\nimport pandas as pd\nimport logging\nfrom os.path import join\nimport pytz\nfrom datetime import datetime\n\nfrom typing import Optional\n\nfrom config import config\nimport coloredlogs\n\n#\n# Time\n#\ndef now() -> pd.Timestamp:\n return pd.Timestamp.utcnow()\n\n\ndef is_in_future(timestamp: pd.Timestamp) -> bool:\n return timestamp > now()\n\n\ndef to_utc(t: datetime) -> datetime:\n return t.astimezone(pytz.utc)\n\n\n#\n# Type validation\n#\ndef is_nonempty_str(my_str: str) -> bool:\n is_str = type(my_str) == str\n if is_str:\n return len(my_str) > 0\n return False\n\n\n#\n# Jurisdictions of providers\n#\ndirname = path.dirname(__file__)\npth = join(dirname, 'analysis', 'providers_labeled.csv')\nprovider_countries = pd.read_csv(pth).set_index('name').drop(['notes', 'url'], axis=1)\nprovider_countries = provider_countries['country (alpha2)'].to_dict()\n\n\ndef get_country(provider_name: str) -> Optional[str]:\n '''\n Returns alpha2 code (str of length 2).\n '''\n try:\n alpha2 = provider_countries[provider_name]\n if len(alpha2) == 2:\n return alpha2\n return None\n except (TypeError):\n logging.debug(f'Country code for {provider_name} is not a string: {alpha2}')\n return None\n except (KeyError):\n logging.info(f'Cannot find country for {provider_name}')\n return None\n\n\ndef configure_logging():\n logging_config = config['logging']\n log_level = logging_config['level']\n if logging_config['handler'] == 'file':\n logging.basicConfig(level=log_level, filename=logging_config['file'], format=logging_config['format'])\n else:\n logging.basicConfig(level=log_level)\n coloredlogs.install()\n coloredlogs.install(level=log_level)\n # Schedule lib logs out params (including db creds) by default so set this to WARNING and above\n logging.getLogger(\"schedule\").setLevel(logging.WARNING)\n # disable noisy logging by filelock (called by TLDExtract to deal with its cache)\n logging.getLogger(\"filelock\").setLevel(logging.ERROR)\n"
] |
[
[
"numpy.array",
"pandas.Timestamp",
"numpy.zeros",
"pandas.Series"
],
[
"pandas.read_csv",
"pandas.Timestamp.utcnow"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlvaroCavalcante/video2tfrecord
|
[
"a77b6e999bbf0edbc254c0fa42549d9ab5f9013c"
] |
[
"src/data_augmentation.py"
] |
[
"import math\n\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\n\n\ndef get_rotation_matrix(rotation, zero, one):\n rotation = math.pi * rotation / 180.\n c1 = tf.math.cos(rotation)\n s1 = tf.math.sin(rotation)\n return tf.reshape(tf.concat([c1, s1, zero, -s1, c1, zero, zero, zero, one], axis=0), [3, 3])\n\n\ndef get_shear_matrix(shear, zero, one):\n shear = math.pi * shear / 180.\n c2 = tf.math.cos(shear)\n s2 = tf.math.sin(shear)\n return tf.reshape(tf.concat([one, s2, zero, zero, c2, zero, zero, zero, one], axis=0), [3, 3])\n\n\ndef multiply_matrix(transformation, origin_matrix, identity_matrix):\n if tf.reduce_all(transformation == identity_matrix):\n return origin_matrix\n\n return K.dot(transformation, origin_matrix)\n\n\ndef get_transform_matrix(rotation, shear, height_zoom, width_zoom, height_shift, width_shift, is_hand=False):\n one = tf.constant([1], dtype='float32')\n zero = tf.constant([0], dtype='float32')\n identity_matrix = tf.cast(tf.reshape(tf.concat(\n [one, zero, zero, zero, one, zero, zero, zero, one], axis=0), [3, 3]), dtype='float32')\n\n transform_matrix = identity_matrix\n\n if tf.random.uniform([], 0, 1.0, dtype=tf.float32) > 0.5:\n rotation_matrix = get_rotation_matrix(rotation, zero, one)\n transform_matrix = multiply_matrix(\n transform_matrix, rotation_matrix, identity_matrix)\n else:\n rotation_matrix = identity_matrix\n\n if tf.random.uniform([], 0, 1.0, dtype=tf.float32) > 0.5:\n shear_matrix = get_shear_matrix(shear, zero, one)\n transform_matrix = multiply_matrix(\n transform_matrix, shear_matrix, identity_matrix)\n else:\n shear_matrix = identity_matrix\n\n if is_hand:\n return transform_matrix\n\n if tf.random.uniform([], 0, 1.0, dtype=tf.float32) > 0.5:\n zoom_matrix = tf.reshape(tf.concat(\n [one/height_zoom, zero, zero, zero, one/width_zoom, zero, zero, zero, one], axis=0), [3, 3])\n transform_matrix = multiply_matrix(\n transform_matrix, zoom_matrix, identity_matrix)\n else:\n zoom_matrix = identity_matrix\n\n if tf.random.uniform([], 0, 1.0, dtype=tf.float32) > 0.5:\n shift_matrix = tf.reshape(tf.concat(\n [one, zero, height_shift, zero, one, width_shift, zero, zero, one], axis=0), [3, 3])\n else:\n shift_matrix = identity_matrix\n transform_matrix = multiply_matrix(\n transform_matrix, shift_matrix, identity_matrix)\n\n return transform_matrix\n\n\ndef apply_operation(image, transform_matrix, DIM, XDIM):\n # LIST DESTINATION PIXEL INDICES\n x = tf.repeat(tf.range(DIM//2, -DIM//2, -1), DIM)\n y = tf.tile(tf.range(-DIM//2, DIM//2), [DIM])\n z = tf.ones([DIM*DIM], dtype='int32')\n idx = tf.stack([x, y, z])\n\n # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS\n idx2 = K.dot(transform_matrix, tf.cast(idx, dtype='float32'))\n idx2 = K.cast(idx2, dtype='int32')\n idx2 = K.clip(idx2, -DIM//2+XDIM+1, DIM//2)\n\n # FIND ORIGIN PIXEL VALUES\n idx3 = tf.stack([DIM//2-idx2[0, ], DIM//2-1+idx2[1, ]])\n d = tf.gather_nd(image, tf.transpose(idx3))\n return tf.reshape(d, [DIM, DIM, 3])\n\n\ndef transform(image, img_width, is_hand=False):\n DIM = img_width\n XDIM = DIM % 2\n\n rotation_range = [-30, 30]\n shear_range = [1, 10]\n h_zoom_range = [0.8, 1.2]\n w_zoom_range = [0.8, 1.2]\n h_shift_range = [0, 0.15]\n w_shift_range = [0, 0.05]\n\n rot = tf.random.uniform([1], rotation_range[0],\n rotation_range[1], dtype=tf.float32)\n shr = tf.random.uniform([1], shear_range[0],\n shear_range[1], dtype=tf.float32)\n h_zoom = tf.random.uniform(\n [1], h_zoom_range[0], h_zoom_range[1], dtype=tf.float32)\n w_zoom = tf.random.uniform(\n [1], w_zoom_range[0], w_zoom_range[1], dtype=tf.float32)\n h_shift = tf.random.uniform(\n [1], h_shift_range[0], h_shift_range[1], dtype=tf.float32) * DIM\n w_shift = tf.random.uniform(\n [1], w_shift_range[0], w_shift_range[1], dtype=tf.float32) * DIM\n\n transform_matrix = get_transform_matrix(\n rot, shr, h_zoom, w_zoom, h_shift, w_shift, is_hand)\n transformed_image = apply_operation(image, transform_matrix, DIM, XDIM)\n\n return transformed_image\n\n\ndef transform_batch(face, hand_1, hand_2, img_width):\n face = transform(face, img_width)\n hand_img_1 = transform(hand_1, img_width, True)\n hand_img_2 = transform(hand_2, img_width, True)\n\n return face, hand_img_1, hand_img_2\n\n"
] |
[
[
"tensorflow.constant",
"tensorflow.math.cos",
"tensorflow.concat",
"tensorflow.range",
"tensorflow.keras.backend.dot",
"tensorflow.stack",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.random.uniform",
"tensorflow.ones",
"tensorflow.keras.backend.cast",
"tensorflow.cast",
"tensorflow.math.sin",
"tensorflow.keras.backend.clip",
"tensorflow.reduce_all"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
wimvillano/git_telco
|
[
"2f783bdb2d72d7862594d15b97e6988b3bdb4b26"
] |
[
"dsfortelco_sklearn.py"
] |
[
"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml import Pipeline\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score, average_precision_score\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport cdsw\n\nspark = SparkSession.builder \\\n .appName(\"Telco Customer Churn\") \\\n .getOrCreate()\n \nschemaData = StructType([StructField(\"state\", StringType(), True),StructField(\"account_length\", DoubleType(), True),StructField(\"area_code\", StringType(), True),StructField(\"phone_number\", StringType(), True),StructField(\"intl_plan\", StringType(), True),StructField(\"voice_mail_plan\", StringType(), True),StructField(\"number_vmail_messages\", DoubleType(), True), StructField(\"total_day_minutes\", DoubleType(), True), StructField(\"total_day_calls\", DoubleType(), True), StructField(\"total_day_charge\", DoubleType(), True), StructField(\"total_eve_minutes\", DoubleType(), True), StructField(\"total_eve_calls\", DoubleType(), True), StructField(\"total_eve_charge\", DoubleType(), True), StructField(\"total_night_minutes\", DoubleType(), True), StructField(\"total_night_calls\", DoubleType(), True), StructField(\"total_night_charge\", DoubleType(), True), StructField(\"total_intl_minutes\", DoubleType(), True), StructField(\"total_intl_calls\", DoubleType(), True), StructField(\"total_intl_charge\", DoubleType(), True), StructField(\"number_customer_service_calls\", DoubleType(), True), StructField(\"churned\", StringType(), True)])\nchurn_data = spark.read.schema(schemaData).csv('/tmp/churn.all.open')\n\nreduced_churn_data= churn_data.select(\"account_length\", \"number_vmail_messages\", \"total_day_calls\",\n \"total_day_charge\", \"total_eve_calls\", \"total_eve_charge\",\n \"total_night_calls\", \"total_night_charge\", \"total_intl_calls\", \n \"total_intl_charge\",\"number_customer_service_calls\")\n\nlabel_indexer = StringIndexer(inputCol = 'churned', outputCol = 'label')\nplan_indexer = StringIndexer(inputCol = 'intl_plan', outputCol = 'intl_plan_indexed')\npipeline = Pipeline(stages=[plan_indexer, label_indexer])\nindexed_data = pipeline.fit(churn_data).transform(churn_data)\n\n(train_data, test_data) = indexed_data.randomSplit([0.7, 0.3])\n\npdTrain = train_data.toPandas()\npdTest = test_data.toPandas()\nfeatures = [\"intl_plan_indexed\",\"account_length\", \"number_vmail_messages\", \"total_day_calls\",\n \"total_day_charge\", \"total_eve_calls\", \"total_eve_charge\",\n \"total_night_calls\", \"total_night_charge\", \"total_intl_calls\", \n \"total_intl_charge\",\"number_customer_service_calls\"]\n\nparam_numTrees = 20\nparam_maxDepth = 20 \nparam_impurity = 'gini' \n\nrandF=RandomForestClassifier(n_jobs=10,\n n_estimators=param_numTrees, \n max_depth=param_maxDepth, \n criterion = param_impurity,\n random_state=0)\n\ncdsw.track_metric(\"numTrees\",param_numTrees)\ncdsw.track_metric(\"maxDepth\",param_maxDepth)\ncdsw.track_metric(\"impurity\",param_impurity)\n\nrandF.fit(pdTrain[features], pdTrain['label'])\n\npredictions=randF.predict(pdTest[features])\n\n#temp = randF.predict_proba(pdTest[features])\n\npd.crosstab(pdTest['label'], predictions, rownames=['Actual'], colnames=['Prediction'])\n\nlist(zip(pdTrain[features], randF.feature_importances_))\n\n\ny_true = pdTest['label']\ny_scores = predictions\nauroc = roc_auc_score(y_true, y_scores)\nap = average_precision_score (y_true, y_scores)\nprint(auroc, ap)\n\ncdsw.track_metric(\"auroc\", auroc)\ncdsw.track_metric(\"ap\", ap)\n\npickle.dump(randF, open(\"models/sklearn_rf.pkl\",\"wb\"))\n\ncdsw.track_file(\"/models/sklearn_rf.pkl\")\n"
] |
[
[
"pandas.crosstab",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.average_precision_score",
"sklearn.ensemble.RandomForestClassifier"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eddie-chiang/ccc4prc
|
[
"7659dfb4fe6e736f80249d935ed37a343dc4fefa",
"7659dfb4fe6e736f80249d935ed37a343dc4fefa"
] |
[
"classifier/CodeComprehensionClassifierFactory.py",
"classifier/DialogueActClassifierFactory.py"
] |
[
"from sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom nlp import LemmaTokenizer\n\n\nclass CodeComprehensionClassifierFactory:\n \"\"\"A factory that creates a classifier to predict code comprehension related Pull Request Comments.\"\"\"\n\n @staticmethod\n def get_classifier(dac_labels: list):\n \"\"\"Defines classification algorithm, to be used in the supervised learning, to create a model.\n\n Args:\n dac_labels (list): Labels for Dialogue Act Classifications. Which should include: Accept, Bye, Clarify, Continuer, Emotion, Emphasis, Greet, Other, Reject, Statement, System, nAnswer, whQuestion, yAnswer, ynQuestion.\n\n Returns:\n classifier: A classifier implements \"fit\", \"score\", \"predict\", and \"predict_proba\" methods.\n \"\"\"\n\n is_author_categories = [\n False, # 0 should come before 1 for numerical columns.\n True\n ]\n\n column_transformer = ColumnTransformer(\n transformers=[\n (\n 'body_tfidf_vectorizer',\n TfidfVectorizer(tokenizer=LemmaTokenizer(), stop_words='english', ngram_range=(1, 2)),\n 'body'\n ),\n (\n 'dac_transformer',\n OneHotEncoder(categories=[dac_labels]),\n ['dialogue_act_classification_ml']\n ),\n (\n 'is_author_transformer',\n OneHotEncoder(categories=[is_author_categories]),\n ['comment_is_by_author']\n ),\n ],\n transformer_weights={\n 'body_tfidf_vectorizer': 4,\n 'dac_transformer': 1,\n 'is_author_transformer': 2,\n },\n verbose=False)\n\n classifier = Pipeline(\n steps=[\n ('preprocessor', column_transformer),\n ('classifier', LogisticRegression(C=500000, solver='lbfgs'))\n ],\n verbose=False)\n\n return classifier\n",
"import collections\nimport logging\nimport pickle\nfrom pathlib import Path\n\nfrom nltk import NaiveBayesClassifier, corpus, word_tokenize\nfrom nltk.classify import accuracy\nfrom nltk.metrics import ConfusionMatrix\nfrom nltk.metrics.scores import precision, recall\nfrom pandas import read_csv\nfrom tqdm import tqdm\n\n\nclass DialogueActClassifierFactory:\n \"\"\"Factory to create a classifier for dialogue act classification.\n \"\"\"\n\n def __init__(self):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.clf = None\n\n def get_classifier(self, classifier_file: Path, test_set_percentage: int) -> NaiveBayesClassifier:\n \"\"\"Train the classifier and persist the model to the specified file, or load from an existing model.\n\n Args:\n classifier_file (Path): A Path object that points to the trained classifier .pickle file.\n test_set_percentage (int): The percentage of labeled NPS Chat corpus to be used as the test set (remainder will be used as the train set).\n\n Returns:\n NaiveBayesClassifier: Trained classifier.\n \"\"\"\n if self.clf != None:\n return self.clf\n\n if classifier_file.is_file():\n with open(classifier_file, mode='rb') as f:\n self.clf = pickle.load(f)\n self.logger.info('Loaded trained dialogue act classifier.')\n _, _, self.test_set = self.__get_featuresets(test_set_percentage)\n else:\n self.logger.info('Training dialogue act classifier.')\n self.clf, self.test_set = self.__train(test_set_percentage)\n\n with open(classifier_file, mode='wb') as f:\n pickle.dump(self.clf, f)\n self.logger.info('Saved trained dialogue act classifier.')\n\n return self.clf\n\n def classify(self, dialogue: str) -> str:\n \"\"\"Classify the given featureset.\n\n Args:\n dialogue (str): A sentence, a passage.\n\n Returns: \n str: The dialogue act type.\n \"\"\"\n unlabeled_data_features = self.__dialogue_act_features(dialogue)\n return self.clf.classify(unlabeled_data_features)\n\n def classify_prc_csv_file(self, prc_csv_file: Path) -> Path:\n \"\"\"Classify the given Pull Request Comments .csv file.\n\n Args:\n prc_csv_file (Path): A Path object that points to the Pull Request Comments .csv file.\n\n Returns:\n Path: The file path of the output file.\n \"\"\"\n classified_csv_file = Path(prc_csv_file.absolute().as_posix().replace('.csv', '_dac_classified.csv'))\n\n if classified_csv_file.exists():\n self.logger.info(f'Output file already exists, stop further processing: {classified_csv_file}')\n return classified_csv_file\n\n data_frame = read_csv(prc_csv_file)\n tqdm.pandas(desc='Classifying Dialogue Act')\n data_frame['dialogue_act_classification_ml'] = data_frame.progress_apply(\n lambda row: self.classify(row['body']),\n axis='columns'\n )\n data_frame.to_csv(classified_csv_file, index=False, header=True, mode='w')\n self.logger.info(f'Dialogue Act Classification completed, output file: {classified_csv_file}')\n self.__classification_report()\n\n return classified_csv_file\n\n def __dialogue_act_features(self, dialogue: str) -> dict:\n features = {}\n for word in word_tokenize(dialogue):\n features['contains({})'.format(word.lower())] = True\n return features\n\n def __train(self, test_set_percentage: int):\n featuresets, train_set, test_set = self.__get_featuresets(test_set_percentage)\n self.logger.info(\n f'Size of feature set: {len(featuresets)}, train on {len(train_set)} instances, test on {len(test_set)} instances.')\n\n # Train the dialogue act classifier.\n return NaiveBayesClassifier.train(train_set), test_set\n\n def __classification_report(self):\n \"\"\"Prints classifier accuracy, precisions and recalls.\n \"\"\"\n self.logger.info(f'Accuracy: {self.get_accuracy()}')\n\n precisions, recalls = self.get_precision_and_recall()\n for label in precisions.keys():\n self.logger.info(f'{label} - precision: {precisions[label]}, recall: {recalls[label]}')\n\n def get_accuracy(self):\n \"\"\"Returns the Accuracy of the Dialogue Act Classifier.\n\n Returns:\n float: Accuracy.\n \"\"\"\n return accuracy(self.clf, self.test_set)\n\n def get_confusion_matrix(self):\n \"\"\"Returns the confusion matrix for the Dialogue Act Classifier.\n\n \"\"\"\n refsets = []\n testsets = []\n\n for _, (features, class_label) in enumerate(self.test_set):\n refsets.append(class_label)\n observed = self.clf.classify(features)\n testsets.append(observed)\n\n return ConfusionMatrix(refsets, testsets)\n\n def get_precision_and_recall(self):\n \"\"\"Returns the Precision and Recall for each class label of the Dialogue Act Classifier.\n\n Returns:\n tuple: (\n dict: A dictionary of the class labels and the corresponding precision.\n dict: A dictionary of the class labels and the corresponding recall.\n )\n \"\"\"\n refsets = collections.defaultdict(set)\n testsets = collections.defaultdict(set)\n\n precisions = dict()\n recalls = dict()\n\n for i, (features, class_label) in enumerate(self.test_set):\n refsets[class_label].add(i)\n observed = self.clf.classify(features)\n testsets[observed].add(i)\n\n for class_label in refsets:\n precisions[class_label] = precision(refsets[class_label], testsets[class_label])\n recalls[class_label] = recall(refsets[class_label], testsets[class_label])\n\n return precisions, recalls\n\n def __get_featuresets(self, test_set_percentage: int):\n # Extract the labeled basic messaging data.\n posts = corpus.nps_chat.xml_posts()\n\n # Construct the train and test data by applying the feature extractor to each post, and create a new classifier.\n featuresets = [(self.__dialogue_act_features(post.text), post.get('class'))\n for post in posts]\n test_set_size = int(len(featuresets) * test_set_percentage / 100)\n train_set, test_set = featuresets[test_set_size:], featuresets[:test_set_size]\n\n return featuresets, train_set, test_set\n"
] |
[
[
"sklearn.preprocessing.OneHotEncoder",
"sklearn.linear_model.LogisticRegression"
],
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
StSav012/pyqtgraph
|
[
"65e17c4e3707eb3bd4d91cdc13504d9b150f4360",
"65e17c4e3707eb3bd4d91cdc13504d9b150f4360",
"65e17c4e3707eb3bd4d91cdc13504d9b150f4360"
] |
[
"pyqtgraph/examples/Legend.py",
"pyqtgraph/examples/GLScatterPlotItem.py",
"pyqtgraph/opengl/items/GLTextItem.py"
] |
[
"\"\"\"\nDemonstrates basic use of LegendItem\n\"\"\"\n\nimport numpy as np\n\nimport pyqtgraph as pg\n\nwin = pg.plot()\nwin.setWindowTitle('pyqtgraph example: BarGraphItem')\n\n# # option1: only for .plot(), following c1,c2 for example-----------------------\n# win.addLegend(frame=False, colCount=2)\n\n# bar graph\nx = np.arange(10)\ny = np.sin(x+2) * 3\nbg1 = pg.BarGraphItem(x=x, height=y, width=0.3, brush='b', pen='w', name='bar')\nwin.addItem(bg1)\n\n# curve\nc1 = win.plot([np.random.randint(0,8) for i in range(10)], pen='r', symbol='t', symbolPen='r', symbolBrush='g', name='curve1')\nc2 = win.plot([2,1,4,3,1,3,2,4,3,2], pen='g', fillLevel=0, fillBrush=(255,255,255,30), name='curve2')\n\n# scatter plot\ns1 = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120), name='scatter')\nspots = [{'pos': [i, np.random.randint(-3, 3)], 'data': 1} for i in range(10)]\ns1.addPoints(spots)\nwin.addItem(s1)\n\n# # option2: generic method------------------------------------------------\nlegend = pg.LegendItem((80,60), offset=(70,20))\nlegend.setParentItem(win.graphicsItem())\nlegend.addItem(bg1, 'bar')\nlegend.addItem(c1, 'curve1')\nlegend.addItem(c2, 'curve2')\nlegend.addItem(s1, 'scatter')\n\nif __name__ == '__main__':\n pg.exec()\n",
"\"\"\"\nDemonstrates use of GLScatterPlotItem with rapidly-updating plots.\n\"\"\"\n\nimport numpy as np\n\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\nfrom pyqtgraph import functions as fn\nfrom pyqtgraph.Qt import QtCore\n\napp = pg.mkQApp(\"GLScatterPlotItem Example\")\nw = gl.GLViewWidget()\nw.show()\nw.setWindowTitle('pyqtgraph example: GLScatterPlotItem')\nw.setCameraPosition(distance=20)\n\ng = gl.GLGridItem()\nw.addItem(g)\n\n\n##\n## First example is a set of points with pxMode=False\n## These demonstrate the ability to have points with real size down to a very small scale \n## \npos = np.empty((53, 3))\nsize = np.empty((53))\ncolor = np.empty((53, 4))\npos[0] = (1,0,0); size[0] = 0.5; color[0] = (1.0, 0.0, 0.0, 0.5)\npos[1] = (0,1,0); size[1] = 0.2; color[1] = (0.0, 0.0, 1.0, 0.5)\npos[2] = (0,0,1); size[2] = 2./3.; color[2] = (0.0, 1.0, 0.0, 0.5)\n\nz = 0.5\nd = 6.0\nfor i in range(3,53):\n pos[i] = (0,0,z)\n size[i] = 2./d\n color[i] = (0.0, 1.0, 0.0, 0.5)\n z *= 0.5\n d *= 2.0\n \nsp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)\nsp1.translate(5,5,0)\nw.addItem(sp1)\n\n\n##\n## Second example shows a volume of points with rapidly updating color\n## and pxMode=True\n##\n\npos = np.random.random(size=(100000,3))\npos *= [10,-10,10]\npos[0] = (0,0,0)\ncolor = np.ones((pos.shape[0], 4))\nd2 = (pos**2).sum(axis=1)**0.5\nsize = np.random.random(size=pos.shape[0])*10\nsp2 = gl.GLScatterPlotItem(pos=pos, color=(1,1,1,1), size=size)\nphase = 0.\n\nw.addItem(sp2)\n\n\n##\n## Third example shows a grid of points with rapidly updating position\n## and pxMode = False\n##\n\npos3 = np.zeros((100,100,3))\npos3[:,:,:2] = np.mgrid[:100, :100].transpose(1,2,0) * [-0.1,0.1]\npos3 = pos3.reshape(10000,3)\nd3 = (pos3**2).sum(axis=1)**0.5\n\nsp3 = gl.GLScatterPlotItem(pos=pos3, color=(1,1,1,.3), size=0.1, pxMode=False)\n\nw.addItem(sp3)\n\n\ndef update():\n ## update volume colors\n global phase, sp2, d2\n s = -np.cos(d2*2+phase)\n color = np.empty((len(d2),4), dtype=np.float32)\n color[:,3] = fn.clip_array(s * 0.1, 0., 1.)\n color[:,0] = fn.clip_array(s * 3.0, 0., 1.)\n color[:,1] = fn.clip_array(s * 1.0, 0., 1.)\n color[:,2] = fn.clip_array(s ** 3, 0., 1.)\n sp2.setData(color=color)\n phase -= 0.1\n \n ## update surface positions and colors\n global sp3, d3, pos3\n z = -np.cos(d3*2+phase)\n pos3[:,2] = z\n color = np.empty((len(d3),4), dtype=np.float32)\n color[:,3] = 0.3\n color[:,0] = np.clip(z * 3.0, 0, 1)\n color[:,1] = np.clip(z * 1.0, 0, 1)\n color[:,2] = np.clip(z ** 3, 0, 1)\n sp3.setData(pos=pos3, color=color)\n \nt = QtCore.QTimer()\nt.timeout.connect(update)\nt.start(50)\n\nif __name__ == '__main__':\n pg.exec()\n",
"from OpenGL.GL import * # noqa\nimport numpy as np\n\nfrom ... import functions as fn\nfrom ...Qt import QtCore, QtGui\nfrom ..GLGraphicsItem import GLGraphicsItem\n\n__all__ = ['GLTextItem']\n\nclass GLTextItem(GLGraphicsItem):\n \"\"\"Draws text in 3D.\"\"\"\n\n def __init__(self, **kwds):\n \"\"\"All keyword arguments are passed to setData()\"\"\"\n GLGraphicsItem.__init__(self)\n glopts = kwds.pop('glOptions', 'additive')\n self.setGLOptions(glopts)\n\n self.pos = np.array([0.0, 0.0, 0.0])\n self.color = QtCore.Qt.GlobalColor.white\n self.text = ''\n self.font = QtGui.QFont('Helvetica', 16)\n\n self.setData(**kwds)\n\n def setData(self, **kwds):\n \"\"\"\n Update the data displayed by this item. All arguments are optional;\n for example it is allowed to update text while leaving colors unchanged, etc.\n\n ==================== ==================================================\n **Arguments:**\n ------------------------------------------------------------------------\n pos (3,) array of floats specifying text location.\n color QColor or array of ints [R,G,B] or [R,G,B,A]. (Default: Qt.white)\n text String to display.\n font QFont (Default: QFont('Helvetica', 16))\n ==================== ==================================================\n \"\"\"\n args = ['pos', 'color', 'text', 'font']\n for k in kwds.keys():\n if k not in args:\n raise ValueError('Invalid keyword argument: %s (allowed arguments are %s)' % (k, str(args)))\n for arg in args:\n if arg in kwds:\n value = kwds[arg]\n if arg == 'pos':\n if isinstance(value, np.ndarray):\n if value.shape != (3,):\n raise ValueError('\"pos.shape\" must be (3,).')\n elif isinstance(value, (tuple, list)):\n if len(value) != 3:\n raise ValueError('\"len(pos)\" must be 3.')\n elif arg == 'color':\n value = fn.mkColor(value)\n elif arg == 'font':\n if isinstance(value, QtGui.QFont) is False:\n raise TypeError('\"font\" must be QFont.')\n setattr(self, arg, value)\n self.update()\n\n def paint(self):\n if len(self.text) < 1:\n return\n self.setupGLState()\n\n modelview = glGetDoublev(GL_MODELVIEW_MATRIX)\n projection = glGetDoublev(GL_PROJECTION_MATRIX)\n\n viewport = [0, 0, self.view().width(), self.view().height()]\n text_pos = self.__project(self.pos, modelview, projection, viewport)\n\n text_pos.setY(viewport[3] - text_pos.y())\n\n painter = QtGui.QPainter(self.view())\n painter.setPen(self.color)\n painter.setFont(self.font)\n painter.setRenderHints(QtGui.QPainter.RenderHint.Antialiasing | QtGui.QPainter.RenderHint.TextAntialiasing)\n painter.drawText(text_pos, self.text)\n painter.end()\n\n def __project(self, obj_pos, modelview, projection, viewport):\n obj_vec = np.append(np.array(obj_pos), [1.0])\n\n view_vec = np.matmul(modelview.T, obj_vec)\n proj_vec = np.matmul(projection.T, view_vec)\n\n if proj_vec[3] == 0.0:\n return QtCore.QPointF(0, 0)\n\n proj_vec[0:3] /= proj_vec[3]\n\n return QtCore.QPointF(\n viewport[0] + (1.0 + proj_vec[0]) * viewport[2] / 2,\n viewport[1] + (1.0 + proj_vec[1]) * viewport[3] / 2\n )\n"
] |
[
[
"numpy.arange",
"numpy.random.randint",
"numpy.sin"
],
[
"numpy.random.random",
"numpy.clip",
"numpy.cos",
"numpy.ones",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.array",
"numpy.matmul"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mlize/gs-quant
|
[
"253ed75519abbbe407e17e39ca5ed7340fa010dc"
] |
[
"gs_quant/test/timeseries/test_analysis.py"
] |
[
"\"\"\"\nCopyright 2018 Goldman Sachs.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\"\"\"\n\nfrom datetime import date\n\nimport pytest\nfrom pandas.util.testing import assert_series_equal\n\nfrom gs_quant.timeseries import *\n\n\ndef test_first():\n\n dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = first(x)\n expected = pd.Series([1.0, 1.0, 1.0, 1.0], index=dates)\n assert_series_equal(result, expected, obj=\"First\")\n\n\ndef test_last():\n\n dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = last(x)\n expected = pd.Series([4.0, 4.0, 4.0, 4.0], index=dates)\n assert_series_equal(result, expected, obj=\"First\")\n\n\ndef test_last_value():\n with pytest.raises(MqValueError):\n last_value(pd.Series())\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=(pd.date_range(\"2020-01-01\", periods=4, freq=\"D\")))\n assert last_value(x) == 4.0\n\n y = pd.Series([5])\n assert last_value(y) == 5\n\n\ndef test_count():\n\n dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = count(x)\n expected = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n assert_series_equal(result, expected, obj=\"Count\")\n\n\ndef test_diff():\n\n dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = diff(x)\n expected = pd.Series([np.nan, 1.0, 1.0, 1.0], index=dates)\n assert_series_equal(result, expected, obj=\"Diff\")\n\n result = diff(x, 2)\n expected = pd.Series([np.nan, np.nan, 2.0, 2.0], index=dates)\n assert_series_equal(result, expected, obj=\"Diff\")\n\n empty = pd.Series([], index=[])\n result = diff(empty)\n assert(len(result) == 0)\n\n\ndef test_lag():\n dates = pd.date_range(\"2019-01-01\", periods=4, freq=\"D\")\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = lag(x, '1m')\n expected = pd.Series([1.0, 2.0, 3.0, 4.0], index=pd.date_range(\"2019-01-31\", periods=4, freq=\"D\"))\n assert_series_equal(result, expected, obj=\"Lag 1m\")\n\n result = lag(x, '2d', LagMode.TRUNCATE)\n expected = pd.Series([1.0, 2.0], index=pd.date_range(\"2019-01-03\", periods=2, freq=\"D\"))\n assert_series_equal(result, expected, obj=\"Lag 2d truncate\")\n\n result = lag(x, mode=LagMode.TRUNCATE)\n expected = pd.Series([np.nan, 1.0, 2.0, 3.0], index=dates)\n assert_series_equal(result, expected, obj=\"Lag\")\n\n result = lag(x, 2, LagMode.TRUNCATE)\n expected = pd.Series([np.nan, np.nan, 1.0, 2.0], index=dates)\n assert_series_equal(result, expected, obj=\"Lag 2\")\n\n result = lag(x, 2, LagMode.EXTEND)\n expected = pd.Series([np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], index=pd.date_range(\"2019-01-01\", periods=6, freq=\"D\"))\n assert_series_equal(result, expected, obj=\"Lag 2 Extend\")\n\n result = lag(x, -2, LagMode.EXTEND)\n expected = pd.Series([1.0, 2.0, 3.0, 4.0, np.nan, np.nan], index=pd.date_range(\"2018-12-30\", periods=6, freq=\"D\"))\n assert_series_equal(result, expected, obj=\"Lag Negative 2 Extend\")\n\n result = lag(x, 2)\n expected = pd.Series([np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], index=pd.date_range(\"2019-01-01\", periods=6, freq=\"D\"))\n assert_series_equal(result, expected, obj=\"Lag 2 Default\")\n\n y = pd.Series([0] * 4, index=pd.date_range('2020-01-01T00:00:00Z', periods=4, freq='S'))\n with pytest.raises(Exception):\n lag(y, 5, LagMode.EXTEND)\n\n z = pd.Series([10, 11, 12], index=pd.date_range('2020-02-28', periods=3, freq='D'))\n result = lag(z, '2y')\n expected = pd.Series([10, 12], index=pd.date_range('2022-02-28', periods=2, freq='D'))\n assert_series_equal(result, expected, obj=\"Lag RDate 2y\")\n"
] |
[
[
"pandas.util.testing.assert_series_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
abenassi/xlseries
|
[
"ac507c732a84f9f692a89894399bf12a933d3687"
] |
[
"xlseries/tests/strategies/test_strategies.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_strategies\n\nTests for `strategies` module.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport unittest\nimport nose\nimport pandas as pd\nfrom functools import wraps\n\nfrom xlseries.strategies.discover.parameters import Parameters\nfrom xlseries.utils.case_loaders import load_original_case\nfrom xlseries.utils.case_loaders import load_parameters_case\nfrom xlseries.utils.case_loaders import load_expected_case\nfrom xlseries.utils.data_frame import compare_period_ranges\nfrom xlseries.utils.data_frame import compare_data_frames\nfrom xlseries.strategies.strategies import ParameterDiscovery\n\n\n# @unittest.skip(\"skip\")\nclass ParameterDiscoveryTestCase(unittest.TestCase):\n\n # @unittest.skip(\"skip\")\n\n def test_get_period_ranges(self):\n\n test_wb = load_original_case(2)\n params = load_parameters_case(2)\n strategy_obj = ParameterDiscovery(test_wb, params)\n ws = strategy_obj.wb.active\n\n pr_d = pd.date_range(\"20020304\", \"20140410\", freq=\"D\")\n pr_m = pd.date_range(\"20020301\", \"20140301\", freq=\"MS\")\n\n period_ranges = list(strategy_obj._get_period_ranges(ws, params))\n\n self.assertTrue(compare_period_ranges(pr_d, period_ranges[0]))\n self.assertTrue(compare_period_ranges(pr_m, period_ranges[1]))\n\n def test_generate_attempts(self):\n params = Parameters({\n \"alignment\": \"vertical\",\n \"headers_coord\": [\"B1\", \"C1\"],\n \"data_starts\": 2,\n \"data_ends\": 256,\n \"frequency\": \"M\",\n \"time_header_coord\": \"A1\",\n \"time_multicolumn\": True,\n \"time_composed\": True,\n \"time_alignment\": 0,\n \"continuity\": True,\n \"blank_rows\": True,\n \"missings\": None,\n \"missing_value\": None,\n \"series_names\": None\n })\n\n non_discovered = [\"missings\"]\n attempts = ParameterDiscovery._generate_attempts(non_discovered,\n params)\n p1 = Parameters({\n \"alignment\": \"vertical\",\n \"headers_coord\": [\"B1\", \"C1\"],\n \"data_starts\": 2,\n \"data_ends\": 256,\n \"frequency\": \"M\",\n \"time_header_coord\": \"A1\",\n \"time_multicolumn\": True,\n \"time_composed\": True,\n \"time_alignment\": 0,\n \"continuity\": True,\n \"blank_rows\": True,\n \"missings\": True,\n \"missing_value\": None,\n \"series_names\": None\n })\n p2 = Parameters({\n \"alignment\": \"vertical\",\n \"headers_coord\": [\"B1\", \"C1\"],\n \"data_starts\": 2,\n \"data_ends\": 256,\n \"frequency\": \"M\",\n \"time_header_coord\": \"A1\",\n \"time_multicolumn\": True,\n \"time_composed\": True,\n \"time_alignment\": 0,\n \"continuity\": True,\n \"blank_rows\": True,\n \"missings\": False,\n \"missing_value\": None,\n \"series_names\": None\n })\n\n self.assertEqual(len(attempts), 2)\n\n for param_name in attempts[0]:\n self.assertEqual(p2[param_name], attempts[0][param_name])\n for param_name in attempts[1]:\n self.assertEqual(p1[param_name], attempts[1][param_name])\n\n def test_param_combinations_generator(self):\n\n missings_dict = {\n \"missings\": [True, False],\n \"blank_rows\": [True, False]\n }\n exp_combinations = [\n {\"missings\": True, \"blank_rows\": True},\n {\"missings\": True, \"blank_rows\": False},\n {\"missings\": False, \"blank_rows\": True},\n {\"missings\": False, \"blank_rows\": False}\n ]\n\n combs = list(ParameterDiscovery._param_combinations_generator(\n missings_dict))\n\n for exp_comb in exp_combinations:\n self.assertIn(exp_comb, combs)\n for comb in combs:\n self.assertIn(comb, exp_combinations)\n\n def test_param_combinations_generator_with_default_values(self):\n\n missings_dict = {\n \"missings\": [True, False],\n \"blank_rows\": [True, False]\n }\n exp_combinations = [\n {\"missings\": True, \"blank_rows\": True},\n {\"missings\": False, \"blank_rows\": True},\n {\"missings\": True, \"blank_rows\": False},\n {\"missings\": False, \"blank_rows\": False}\n ]\n default_values = {\"missings\": True, \"blank_rows\": True}\n likeliness_order = [\"blank_rows\", \"missings\"]\n\n combs = list(ParameterDiscovery._param_combinations_generator(\n missings_dict, default_values, likeliness_order))\n\n for comb, exp_comb in zip(combs, exp_combinations):\n self.assertEqual(comb, exp_comb)\n\n def test_param_combinations_generator_total_results(self):\n\n missings_dict = {\"alignment\": [\"vertical\", \"horizontal\"],\n \"continuity\": [True, False],\n \"blank_rows\": [True, False],\n \"missings\": [True, False],\n \"time_multicolumn\": [True, False],\n \"time_composed\": [True, False]}\n default_values = Parameters.DEFAULT_VALUES\n likeliness_order = Parameters.LIKELINESS_ORDER\n\n with_def = list(ParameterDiscovery._param_combinations_generator(\n missings_dict, default_values, likeliness_order))\n no_def = list(ParameterDiscovery._param_combinations_generator(\n missings_dict))\n\n self.assertEqual(len(with_def), len(no_def))\n\n for comb_with_no_def in no_def:\n self.assertIn(comb_with_no_def, with_def)\n\n for comb_with_def in with_def:\n self.assertIn(comb_with_def, no_def)\n\n\nif __name__ == '__main__':\n # unittest.main()\n nose.run(defaultTest=__name__)\n"
] |
[
[
"pandas.date_range"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ihish52/d-HAT
|
[
"35d12828fb9e8e7a184adaa4caa187bef3a94be2",
"35d12828fb9e8e7a184adaa4caa187bef3a94be2"
] |
[
"train.py",
"fairseq/modules/linear_super.py"
] |
[
"# HAT: Hardware-Aware Transformers for Efficient Natural Language Processing\n# Hanrui Wang, Zhanghao Wu, Zhijian Liu, Han Cai, Ligeng Zhu, Chuang Gan and Song Han\n# The 58th Annual Meeting of the Association for Computational Linguistics (ACL), 2020.\n# Paper: https://arxiv.org/abs/2005.14187\n# Project page: https://hanruiwang.me/project_pages/hat/\n\nimport collections\nimport math\nimport random\nimport torch\nimport pdb\n\nfrom fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\nfrom fairseq.data import iterators\nfrom fairseq.trainer import Trainer\nfrom fairseq.meters import AverageMeter, StopwatchMeter\nfrom copy import deepcopy\n\n\ndef main(args, init_distributed=False):\n utils.import_user_module(args)\n utils.handle_save_path(args)\n\n assert args.max_tokens is not None or args.max_sentences is not None, \\\n 'Must specify batch size either with --max-tokens or --max-sentences'\n\n # Initialize CUDA and distributed training\n if torch.cuda.is_available() and not args.cpu:\n torch.cuda.set_device(args.device_id)\n torch.manual_seed(args.seed)\n if init_distributed:\n args.distributed_rank = distributed_utils.distributed_init(args)\n\n if distributed_utils.is_master(args):\n checkpoint_utils.verify_checkpoint_directory(args.save_dir)\n\n # Print args\n print(f\"| Configs: {args}\")\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for valid_sub_split in args.valid_subset.split(','):\n task.load_dataset(valid_sub_split, combine=False, epoch=0)\n\n # Build model and criterion\n model = task.build_model(args)\n criterion = task.build_criterion(args)\n print(f\"| Model: {args.arch} \\n| Criterion: {criterion.__class__.__name__}\")\n\n # Log architecture\n if args.train_subtransformer:\n print(\" \\n\\n\\t\\tWARNING!!! Training one single SubTransformer\\n\\n\")\n print(f\"| SubTransformer Arch: {utils.get_subtransformer_config(args)} \\n\")\n else:\n print(\" \\n\\n\\t\\tWARNING!!! Training SuperTransformer\\n\\n\")\n print(f\"| SuperTransformer Arch: {model} \\n\")\n\n # Log model size\n if args.train_subtransformer:\n print(f\"| SubTransformer size (without embedding weights): {model.get_sampled_params_numel(utils.get_subtransformer_config(args))}\")\n embed_size = args.decoder_embed_dim_subtransformer * len(task.tgt_dict)\n print(f\"| Embedding layer size: {embed_size} \\n\")\n\n else:\n model_s = 0\n # if use model.state_dict, then will add 2 more parameters, they are encoder.version and decoder.version. Should not count them\n for name, param in model.named_parameters():\n if 'embed' not in name:\n model_s += param.numel()\n print(f\"| SuperTransofmer model size (without embedding weights): {model_s}\")\n\n print(f\"| Embedding layer size: {sum(p.numel() for p in model.parameters() if p.requires_grad) - model_s} \\n\")\n\n # specify the length of the dummy input for profile\n # for iwslt, the average length is 23, for wmt, that is 30\n dummy_sentence_length_dict = {'iwslt': 23, 'wmt': 30}\n if 'iwslt' in args.arch:\n dummy_sentence_length = dummy_sentence_length_dict['iwslt']\n elif 'wmt' in args.arch:\n dummy_sentence_length = dummy_sentence_length_dict['wmt']\n else:\n raise NotImplementedError\n\n dummy_src_tokens = [2] + [7] * (dummy_sentence_length - 1)\n dummy_prev = [7] * (dummy_sentence_length - 1) + [2]\n\n # profile the overall FLOPs number\n if args.profile_flops:\n import torchprofile\n config_subtransformer = utils.get_subtransformer_config(args)\n model.set_sample_config(config_subtransformer)\n model.profile(mode=True)\n macs = torchprofile.profile_macs(model, args=(torch.tensor([dummy_src_tokens], dtype=torch.long), torch.tensor([30]), torch.tensor([dummy_prev], dtype=torch.long)))\n model.profile(mode=False)\n\n last_layer_macs = config_subtransformer['decoder']['decoder_embed_dim'] * dummy_sentence_length * len(task.tgt_dict)\n\n print(f\"| Total FLOPs: {macs * 2}\")\n print(f\"| Last layer FLOPs: {last_layer_macs * 2}\")\n print(f\"| Total FLOPs without last layer: {(macs - last_layer_macs) * 2} \\n\")\n exit(0)\n\n # Build trainer\n trainer = Trainer(args, task, model, criterion)\n print(f\"| Training on {args.distributed_world_size} GPUs\")\n print(f\"| Max tokens per GPU = {args.max_tokens} and max sentences per GPU = {args.max_sentences} \\n\")\n\n # Measure model latency, the program will exit after profiling latency\n if args.latcpu or args.latgpu:\n utils.measure_latency(args, model, dummy_src_tokens, dummy_prev)\n exit(0)\n\n # Load the latest checkpoint if one is available and restore the corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)\n\n # Evaluate the SubTransformer\n if args.validate_subtransformer:\n config = utils.get_subtransformer_config(args)\n trainer.set_sample_config(config)\n valid_loss = validate(args, trainer, task, epoch_itr, ['valid'], 'SubTransformer')\n print(f\"| SubTransformer validation loss:{valid_loss}\")\n\n # Loop boundaries\n max_epoch = args.max_epoch or math.inf\n max_update = args.max_update or math.inf\n lr = trainer.get_lr()\n\n train_meter = StopwatchMeter()\n train_meter.start()\n valid_subsets = args.valid_subset.split(',')\n\n represent_configs = utils.get_represent_configs(args)\n\n # Main training loop\n while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:\n # train for one epoch\n train(args, trainer, task, epoch_itr)\n\n if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:\n for k, v in represent_configs.items():\n trainer.set_sample_config(config=v)\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets, sampled_arch_name=k)\n else:\n valid_losses = [None]\n\n # update the best loss and get current lr; the real lr scheduling is done in trainer.train_step()\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n # save checkpoint epoch level\n if epoch_itr.epoch % args.save_interval == 0:\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n train_meter.stop()\n print('| Done training in {:.1f} seconds'.format(train_meter.sum))\n\n\ndef train(args, trainer, task, epoch_itr):\n \"\"\"Train the model for one epoch.\"\"\"\n # Update parameters every N batches\n update_freq = args.update_freq[epoch_itr.epoch - 1] \\\n if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]\n\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=args.fix_batches_to_gpus,\n shuffle=(epoch_itr.epoch >= args.curriculum),\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch,\n )\n\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n valid_subsets = args.valid_subset.split(',')\n max_update = args.max_update or math.inf\n\n represent_configs = utils.get_represent_configs(args)\n\n for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):\n if args.train_subtransformer:\n # training one SubTransformer only\n configs = [utils.get_subtransformer_config(args)]\n else:\n # training SuperTransformer by randomly sampling SubTransformers\n configs = [utils.sample_configs(utils.get_all_choices(args), reset_rand_seed=True, rand_seed=trainer.get_num_updates(),\n super_decoder_num_layer=args.decoder_layers)]\n\n log_output = trainer.train_step(samples, configs=configs)\n if log_output is None:\n continue\n\n # log mid-epoch stats\n stats = utils.get_training_stats(trainer)\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:\n continue # these are already logged above\n if 'loss' in k or k == 'accuracy':\n extra_meters[k].update(v, log_output['sample_size'])\n else:\n extra_meters[k].update(v)\n stats[k] = extra_meters[k].avg\n\n utils.log_arch_info(stats, configs[0])\n\n progress.log(stats, tag='train', step=stats['num_updates'])\n\n # ignore the first mini-batch in words-per-second calculation\n if i == 0:\n trainer.get_meter('wps').reset()\n\n num_updates = trainer.get_num_updates()\n if (\n not args.disable_validation\n and args.save_interval_updates > 0\n and num_updates % args.save_interval_updates == 0\n and num_updates > 0\n ):\n for k, v in represent_configs.items():\n trainer.set_sample_config(config=v)\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets, sampled_arch_name=k)\n\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n if num_updates >= max_update:\n break\n\n # log end-of-epoch stats\n stats = utils.get_training_stats(trainer)\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n progress.print(stats, tag='train', step=stats['num_updates'])\n\n # reset training meters\n for k in [\n 'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',\n ]:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n\n\ndef validate(args, trainer, task, epoch_itr, subsets, sampled_arch_name):\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n valid_losses = []\n for subset in subsets:\n # Initialize data iterator\n def get_itr():\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args.max_tokens_valid,\n max_sentences=args.max_sentences_valid,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n trainer.get_model().max_positions(),\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch,\n prefix='validate on \\'{}\\' subset'.format(subset),\n )\n return progress\n progress = get_itr()\n\n # reset validation loss meters\n for k in ['valid_loss', 'valid_nll_loss']:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n\n for sample in progress:\n log_output = trainer.valid_step(sample)\n\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:\n continue\n extra_meters[k].update(v)\n\n # log validation stats\n stats = utils.get_valid_stats(trainer, args)\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n\n # log validation stats\n stats = utils.get_valid_stats(trainer, args, extra_meters)\n\n stats[sampled_arch_name+'_loss'] = deepcopy(stats['loss'])\n stats[sampled_arch_name+'_nll_loss'] = deepcopy(stats['nll_loss'])\n\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(\n stats[args.best_checkpoint_metric].avg\n if args.best_checkpoint_metric == 'loss'\n else stats[args.best_checkpoint_metric]\n )\n return valid_losses\n\n\ndef distributed_main(i, args, start_rank=0):\n args.device_id = i\n if args.distributed_rank is None: # torch.multiprocessing.spawn\n args.distributed_rank = start_rank + i\n main(args, init_distributed=True)\n\n\ndef cli_main():\n parser = options.get_training_parser()\n parser.add_argument('--train-subtransformer', action='store_true', default=False, help='whether train SuperTransformer or SubTransformer')\n parser.add_argument('--sub-configs', required=False, is_config_file=True, help='when training SubTransformer, use --configs to specify architecture and --sub-configs to specify other settings')\n\n # for profiling\n parser.add_argument('--profile-flops', action='store_true', help='measure the FLOPs of a SubTransformer')\n\n parser.add_argument('--latgpu', action='store_true', help='measure SubTransformer latency on GPU')\n parser.add_argument('--latcpu', action='store_true', help='measure SubTransformer latency on CPU')\n parser.add_argument('--latiter', type=int, default=300, help='how many iterations to run when measure the latency')\n parser.add_argument('--latsilent', action='store_true', help='keep silent when measure latency')\n\n parser.add_argument('--validate-subtransformer', action='store_true', help='evaluate the SubTransformer on the validation set')\n\n options.add_generation_args(parser)\n\n args = options.parse_args_and_arch(parser)\n\n if args.latcpu:\n args.cpu = True\n args.fp16 = False\n\n if args.latgpu or args.latcpu or args.profile_flops:\n args.distributed_world_size = 1\n\n if args.pdb:\n pdb.set_trace()\n\n if args.distributed_init_method is None:\n distributed_utils.infer_init_method(args)\n\n if args.distributed_init_method is not None:\n # distributed training\n if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:\n start_rank = args.distributed_rank\n args.distributed_rank = None # assign automatically\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, start_rank),\n nprocs=torch.cuda.device_count(),\n )\n else:\n distributed_main(args.device_id, args)\n elif args.distributed_world_size > 1:\n # fallback for single node with multiple GPUs\n assert args.distributed_world_size <= torch.cuda.device_count()\n port = random.randint(10000, 20000)\n args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)\n args.distributed_rank = None # set based on device id\n if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':\n print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, ),\n nprocs=args.distributed_world_size,\n )\n else:\n # single GPU training\n main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n",
"# HAT: Hardware-Aware Transformers for Efficient Natural Language Processing\n# Hanrui Wang, Zhanghao Wu, Zhijian Liu, Han Cai, Ligeng Zhu, Chuang Gan and Song Han\n# The 58th Annual Meeting of the Association for Computational Linguistics (ACL), 2020.\n# Paper: https://arxiv.org/abs/2005.14187\n# Project page: https://hanruiwang.me/project_pages/hat/\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass LinearSuper(nn.Linear):\n def __init__(self, super_in_dim, super_out_dim, bias=True, uniform_=None, non_linear='linear'):\n super().__init__(super_in_dim, super_out_dim, bias=bias)\n\n # super_in_dim and super_out_dim indicate the largest network!\n self.super_in_dim = super_in_dim\n self.super_out_dim = super_out_dim\n\n # input_dim and output_dim indicate the current sampled size\n self.sample_in_dim = None\n self.sample_out_dim = None\n\n self.samples = {}\n\n self._reset_parameters(bias, uniform_, non_linear)\n self.profiling = False\n\n def profile(self, mode=True):\n self.profiling = mode\n\n def sample_parameters(self, resample=False):\n if self.profiling or resample:\n return self._sample_parameters()\n return self.samples\n\n def _reset_parameters(self, bias, uniform_, non_linear):\n nn.init.xavier_uniform_(self.weight) if uniform_ is None else uniform_(\n self.weight, non_linear=non_linear)\n if bias:\n nn.init.constant_(self.bias, 0.)\n\n def set_sample_config(self, sample_in_dim, sample_out_dim):\n self.sample_in_dim = sample_in_dim\n self.sample_out_dim = sample_out_dim\n\n self._sample_parameters()\n\n def _sample_parameters(self):\n self.samples['weight'] = sample_weight(self.weight, self.sample_in_dim, self.sample_out_dim)\n self.samples['bias'] = self.bias\n if self.bias is not None:\n self.samples['bias'] = sample_bias(self.bias, self.sample_out_dim)\n return self.samples\n\n def forward(self, x):\n self.sample_parameters()\n return F.linear(x, self.samples['weight'], self.samples['bias'])\n\n def calc_sampled_param_num(self):\n assert 'weight' in self.samples.keys()\n weight_numel = self.samples['weight'].numel()\n\n if self.samples['bias'] is not None:\n bias_numel = self.samples['bias'].numel()\n else:\n bias_numel = 0\n\n return weight_numel + bias_numel\n\n\ndef sample_weight(weight, sample_in_dim, sample_out_dim):\n sample_weight = weight[:, :sample_in_dim]\n sample_weight = sample_weight[:sample_out_dim, :]\n\n return sample_weight\n\n\ndef sample_bias(bias, sample_out_dim):\n sample_bias = bias[:sample_out_dim]\n\n return sample_bias\n"
] |
[
[
"torch.cuda.set_device",
"torch.multiprocessing.spawn",
"torch.manual_seed",
"torch.tensor",
"torch.cuda.is_available",
"torch.cuda.device_count"
],
[
"torch.nn.init.constant_",
"torch.nn.functional.linear",
"torch.nn.init.xavier_uniform_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bin-lin/PyRef
|
[
"30502be3185d343c91d76ba51b011bc191440a33"
] |
[
"preprocessing/refactoring_heuristics.py"
] |
[
"import ast\nimport pandas as pd\nimport editdistance\nfrom preprocessing.refactorings_info import RefInfo\nfrom preprocessing.conditions_match import body_mapper\nfrom preprocessing.refactorings import ExtractInlineRef, RenameRef, MoveRef, ClassRef, ExtractVarRef\nfrom preprocessing.utils import intersection, get_stmts_recursive, is_extracted\n\n\ndef extract_method_ref(common_methods, added_methods):\n refs = []\n\n for tuple_m in common_methods:\n for added_m in added_methods[:]:\n method1 = tuple_m[0]\n method2 = tuple_m[1]\n if not method1.calls(added_m) and method1.class_node == added_m.class_node and method2.calls(added_m):\n mapped_stmts = body_mapper(tuple_m, added_m, RefInfo.EXTRACT)\n if len(mapped_stmts.index) == 0:\n continue\n\n method1_stmts = [{\"stmt1\": str(stmt)} for stmt in method1.get_all_stmts()]\n method2_stmts = [{\"stmt1\": str(stmt)} for stmt in method2.get_all_stmts()]\n\n method1_stmts_df = pd.DataFrame(method1_stmts).groupby(\"stmt1\")[\"stmt1\"].agg('count').reset_index(\n name=\"count\")\n method2_stmts_df = pd.DataFrame(method2_stmts).groupby(\"stmt1\")[\"stmt1\"].agg('count').reset_index(\n name=\"count\")\n\n stmts_merge = method1_stmts_df.merge(method2_stmts_df, how=\"outer\", on=\"stmt1\").fillna(0)\n\n mapped_stmts_agg = mapped_stmts.groupby(\"stmt1\")[\"stmt1\"].agg('count').reset_index(name=\"count\")\n\n mapped_stmts_merge = mapped_stmts_agg.merge(stmts_merge, how=\"outer\", on=\"stmt1\").dropna()\n\n to_remove = mapped_stmts_merge[~(\n abs(mapped_stmts_merge[\"count\"] - mapped_stmts_merge[\"count_x\"]) >= mapped_stmts_merge[\n \"count_y\"])][\"stmt1\"].tolist()\n\n mapped_stmts = mapped_stmts[~(mapped_stmts[\"stmt1\"].isin(to_remove))]\n\n mapped_stmts_len = len(mapped_stmts[~mapped_stmts.type.str.contains(\"inner\")].index)\n\n method2_unmapped = added_m.get_total_stmts_count() - mapped_stmts_len\n if mapped_stmts_len >= method2_unmapped:\n added_methods.remove(added_m)\n refs.append(\n ExtractInlineRef(method1.name, added_m.name, \"Extract Method\", added_m.get_path(), mapped_stmts,\n tuple_m, added_m))\n return refs\n\n\ndef change_class_signature(removed_classes: list, added_classes: list, common_classes: list):\n refs = []\n sub_refs = []\n for removed_class in removed_classes[:]:\n for added_class in added_classes[:]:\n\n removed_class_methods = set([method.name for method in removed_class.methods])\n added_class_methods = set([method.name for method in added_class.methods])\n removed_class_fields = set(removed_class.fields)\n added_class_fields = set(added_class.fields)\n\n common_methods_len = len(removed_class_methods.intersection(added_class_methods))\n total_methods_len = len(removed_class_methods) if len(removed_class_methods) > len(\n added_class_methods) else len(added_class_methods)\n\n if ((removed_class_methods.issubset(added_class_methods) or added_class_methods.issubset(\n removed_class_methods)) and (\n removed_class_fields.issubset(added_class_fields) or added_class_fields.issubset(\n removed_class_fields))) or (common_methods_len > total_methods_len / 2 and (\n removed_class_fields.issubset(added_class_fields) or added_class_fields.issubset(\n removed_class_fields))):\n if not (removed_class.name == added_class.name):\n if not total_methods_len == 0:\n matched_method_ratio = common_methods_len / total_methods_len\n else:\n matched_method_ratio = 0\n sub_refs.append({\"Removed Class\": removed_class.name, \"Added Class\": added_class.name,\n \"Matched Method\": matched_method_ratio,\n \"Name Similarity\": editdistance.eval(added_class.name, removed_class.name),\n \"Ref\": ClassRef(removed_class.name, added_class.name, \"Rename Class\",\n added_class.module.name)})\n if not (removed_class.module.name == added_class.module.name):\n sub_refs.append({\"Removed Class\": removed_class.name, \"Added Class\": added_class.name,\n \"Matched Method\": matched_method_ratio,\n \"Name Similarity\": editdistance.eval(added_class.name, removed_class.name),\n \"Ref\": ClassRef(removed_class.name, added_class.name, \"Move Class\",\n added_class.module.name)})\n\n sub_refs = pd.DataFrame(sub_refs)\n if len(sub_refs.index) > 0:\n sub_refs = sub_refs.sort_values([\"Matched Method\", \"Name Similarity\"], ascending=[False, True]).groupby(\n 'Added Class').head(1)\n sub_refs = sub_refs.sort_values([\"Matched Method\", \"Name Similarity\"], ascending=[False, True]).groupby(\n 'Removed Class').head(1)\n\n refs = sub_refs[\"Ref\"].tolist()\n # remove class and add to common\n\n return refs\n\n\ndef inline_method_ref(common_methods, removed_methods):\n refs = []\n for tuple_m in common_methods:\n for removed_m in removed_methods[:]:\n method1 = tuple_m[0]\n adjacent_method = tuple_m[1]\n if method1.calls(removed_m) and adjacent_method.class_node == removed_m.class_node and \\\n not adjacent_method.calls(removed_m):\n mapped_stmts = body_mapper(tuple_m, removed_m, RefInfo.INLINE)\n if len(mapped_stmts.index) == 0:\n continue\n\n method1_stmts = [{\"stmt1\": str(stmt)} for stmt in method1.get_all_stmts()]\n method2_stmts = [{\"stmt1\": str(stmt)} for stmt in adjacent_method.get_all_stmts()]\n\n method1_stmts_df = pd.DataFrame(method1_stmts).groupby(\"stmt1\")[\"stmt1\"].agg('count').reset_index(\n name=\"count\")\n method2_stmts_df = pd.DataFrame(method2_stmts).groupby(\"stmt1\")[\"stmt1\"].agg('count').reset_index(\n name=\"count\")\n\n stmts_merge = method1_stmts_df.merge(method2_stmts_df, how=\"outer\", on=\"stmt1\").fillna(0)\n\n mapped_stmts_agg = mapped_stmts.groupby(\"stmt1\")[\"stmt1\"].agg('count').reset_index(name=\"count\")\n\n mapped_stmts_merge = mapped_stmts_agg.merge(stmts_merge, how=\"outer\", on=\"stmt1\").dropna()\n\n to_remove = mapped_stmts_merge[~(\n abs(mapped_stmts_merge[\"count\"] + mapped_stmts_merge[\"count_x\"]) <= mapped_stmts_merge[\n \"count_y\"])][\"stmt1\"].tolist()\n\n mapped_stmts = mapped_stmts[~(mapped_stmts[\"stmt1\"].isin(to_remove))]\n mapped_stmts_len = len(mapped_stmts[~mapped_stmts.type.str.contains(\"inner\")].index)\n method1_unmapped = adjacent_method.get_total_stmts_count() - mapped_stmts_len\n method2_unmapped = removed_m.get_total_stmts_count() - mapped_stmts_len\n if mapped_stmts_len > method2_unmapped:\n removed_methods.remove(removed_m)\n refs.append(\n ExtractInlineRef(method1.name, removed_m.name, \"Inline Method\", removed_m.get_path(),\n mapped_stmts,\n tuple_m, removed_m))\n break\n return refs\n\n\ndef move_method_ref(diff_common_element):\n diff_common_element = diff_common_element[:]\n refs = []\n\n removed_methods = []\n added_methods = []\n for diff_element in diff_common_element:\n removed_methods.extend(diff_element.removed_methods)\n added_methods.extend(diff_element.added_methods)\n for sub_diff_element in diff_element.diff_common_classes:\n removed_methods.extend(sub_diff_element.removed_methods)\n added_methods.extend(sub_diff_element.added_methods)\n for removed_method in removed_methods:\n metrics = []\n sub_refs = []\n for added_method in added_methods:\n mapped_stmts = body_mapper(removed_method, added_method, RefInfo.RENAME)\n if len(mapped_stmts.index) == 0:\n continue\n mapped_stmts_len = len(mapped_stmts[~mapped_stmts.type.str.contains(\"inner\")].index)\n method1_unmapped = abs(removed_method.get_total_stmts_count() - mapped_stmts_len)\n method2_unmapped = abs(added_method.get_total_stmts_count() - mapped_stmts_len)\n if mapped_stmts_len >= method1_unmapped and mapped_stmts_len >= method2_unmapped and added_method.name == removed_method.name:\n priority = len(mapped_stmts[mapped_stmts['replacements'].isnull()])\n total_distance = mapped_stmts[\"distance\"].sum()\n if len(metrics) == 0:\n metrics = [priority / mapped_stmts_len, total_distance, mapped_stmts_len]\n else:\n if not (priority / mapped_stmts_len >= metrics[0] and total_distance <= metrics[\n 1] and mapped_stmts_len >= metrics[2]):\n continue\n else:\n sub_refs = []\n metrics = [priority / mapped_stmts_len, total_distance, mapped_stmts_len]\n _move = \"None\"\n if not (removed_method.module.name == added_method.module.name):\n if removed_method.class_node is not None and added_method.class_node is not None:\n if added_method.class_node.name in removed_method.class_node.bases:\n _move = \"Pull Up\"\n if removed_method.class_node.name in added_method.class_node.bases:\n _move = \"Push Down\"\n sub_refs.append(\n MoveRef(removed_method.name, added_method.name, \"Move Method\", added_method.get_path(),\n removed_method.get_path(), _move, mapped_stmts, removed_method,\n added_method))\n # break\n\n elif removed_method.class_node is not None and added_method.class_node is not None:\n if not (removed_method.class_node == added_method.class_node):\n if added_method.class_node.name in removed_method.class_node.bases:\n _move = \"Pull Up\"\n if removed_method.class_node.name in added_method.class_node.bases:\n _move = \"Push Down\"\n sub_refs.append(\n MoveRef(removed_method.name, added_method.name, \"Move Method\", added_method.get_path(),\n removed_method.get_path(), _move, mapped_stmts, removed_method,\n added_method))\n # break\n elif removed_method.class_node is not None or added_method.class_node is not None:\n sub_refs.append(\n MoveRef(removed_method.name, added_method.name, \"Move Method\", added_method.get_path(),\n removed_method.get_path(), _move, mapped_stmts, removed_method,\n added_method))\n # break\n refs = refs + sub_refs\n\n return refs\n\n\ndef method_signature_change_ref(added_methods, removed_methods, common_methods):\n refs = []\n matched_methods = pd.DataFrame(\n columns=['from', 'to', 'ref_type', 'priority', 'total_distance', 'path', 'm1', 'm2', 'mapped_stmts', 'param_change'])\n mapped_stmts = []\n for removed_method in removed_methods:\n for added_m in added_methods:\n mapped_stmts = body_mapper(removed_method, added_m, RefInfo.RENAME)\n if len(mapped_stmts.index) == 0:\n continue\n # mapped_stmts_index = len(mapped_stmts.apply(lambda row: not (\"inner\" in row[\"type\"]), axis=1))\n mapped_stmts_len = len(mapped_stmts[~mapped_stmts.type.str.contains(\"inner\")].index)\n method1_unmapped = abs(removed_method.get_total_stmts_count() - mapped_stmts_len)\n method2_unmapped = abs(added_m.get_total_stmts_count() - mapped_stmts_len)\n other_added_methods = [added_m for added_m in added_methods if not added_m.name == added_m.name]\n extracted_refs = extract_method_ref([(removed_method, added_m)], other_added_methods)\n refs.extend(extracted_refs)\n if added_m.class_node == removed_method.class_node and \\\n ((method1_unmapped == 0 and method2_unmapped == 0) or (\n (mapped_stmts_len >= method1_unmapped and mapped_stmts_len >= method2_unmapped) and\n (set(added_m.params).issuperset(set(removed_method.params)) or\n set(removed_method.params).issuperset(set(added_m.params)) or\n len(intersection(set(added_m.params), set(removed_method.params))) >\n len(set(added_m.params + removed_method.params).difference(\n intersection(set(added_m.params), set(removed_method.params))))))\n or (len(extracted_refs) >= 1 and mapped_stmts_len > method2_unmapped)\n ):\n\n _changes = []\n _param_change = []\n if not added_m.name == removed_method.name:\n _changes.append(\"Rename Method\")\n if len(set(added_m.params).difference(set(removed_method.params))) > 0 or len(set(removed_method.params).difference(set(added_m.params))) > 0:\n pre_params = [i for i in removed_method.params if i not in added_m.params]\n post_params = [i for i in added_m.params if i not in removed_method.params]\n if len(set(added_m.params)) > len(set(removed_method.params)):\n _changes.append(\"Add Parameter\")\n _param_change = [post_params[len(pre_params):]]\n # _param_change = [set(added_m.params).difference(removed_method.params)]\n elif len(set(added_m.params)) < len(set(removed_method.params)):\n _changes.append(\"Remove Parameter\")\n _param_change = [pre_params[len(post_params):]]\n # _param_change = [set(removed_method.params).difference(added_m.params)]\n else:\n _changes.append(\"Change/Rename Parameter\")\n _param_change = [pre_params, post_params]\n # _param_change = [set(removed_method.params).difference(added_m.params),\n # set(added_m.params).difference(removed_method.params)]\n if not added_m.return_type() == removed_method.return_type():\n _changes.append(\"Change Return Type\")\n\n priority = len(mapped_stmts[mapped_stmts['replacements'].isnull()])\n total_distance = mapped_stmts[\"distance\"].sum()\n common_methods.append((removed_method, added_m))\n if len(_changes) > 0:\n matched_methods = matched_methods.append(\n {\"from\": removed_method.name, \"to\": added_m.name,\n \"ref_type\": _changes,\n \"priority\": priority, \"total_distance\": total_distance, \"path\": added_m.get_path(),\n \"m1\": removed_method,\n \"m2\": added_m, 'mapped_stmts': mapped_stmts, 'param_change': _param_change},\n ignore_index=True\n )\n\n if len(matched_methods.index) > 0:\n gp = matched_methods.groupby(['from'])\n\n toKeep = []\n\n for name, group in gp:\n toKeep.append(\n group.sort_values(['priority', 'total_distance'], ascending=[False, True]).iloc[0])\n\n matched_methods = pd.DataFrame(toKeep) # FURTHER CHECK\n toKeep = []\n\n gp = matched_methods.groupby(['to'])\n\n for name, group in gp:\n toKeep.append(\n group.sort_values(['priority', 'total_distance'], ascending=[False, True]).iloc[0])\n\n matched_methods = pd.DataFrame(toKeep)\n\n matched_methods = matched_methods.values.tolist()\n\n for matched_method in matched_methods:\n refs.append(\n RenameRef(matched_method[0], matched_method[1], matched_method[2], matched_method[5], matched_method[-1],\n matched_method[6], matched_method[7], matched_method[9]))\n added_m_rm = [added_m for added_m in added_methods if added_m.name == matched_method[1]]\n remove_m_rm = [removed_m for removed_m in removed_methods if removed_m.name == matched_method[0]]\n\n if len(added_m_rm) > 0 and len(remove_m_rm) > 0:\n added_methods.remove(added_m_rm[0])\n removed_methods.remove(remove_m_rm[0])\n return refs\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gorsol/pyroms
|
[
"d293c9949daf95ec8a0a4e2ff1f67af8969c2b3f"
] |
[
"pyroms_toolbox/pyroms_toolbox/CGrid_GLORYS/flood.py"
] |
[
"# encoding: utf-8\n\nimport numpy as np\nfrom pyroms import _remapping\n\nimport pyroms\n\ndef flood(varz, Cgrd, Cpos='t', irange=None, jrange=None, \\\n spval=-9.99e+33, dmax=0, cdepth=0, kk=0):\n \"\"\"\n var = flood(var, Cgrd)\n\n optional switch:\n - Cpos='t', 'u', 'v' specify the grid position where\n the variable resides\n - irange specify grid sub-sample for i direction\n - jrange specify grid sub-sample for j direction\n - spval=1e35 define spval value\n - dmax=0 if dmax>0, maximum horizontal\n flooding distance\n - cdepth=0 critical depth for flooding\n if depth<cdepth => no flooding\n - kk\n\n Flood varz on Cgrd\n \"\"\"\n\n varz = varz.copy()\n varz = np.array(varz)\n\n assert len(varz.shape) == 3, 'var must be 3D'\n\n # replace spval by nan\n idx = np.where(abs((varz-spval)/spval)<=1e-5)\n varz[idx] = np.nan\n\n x = Cgrd.lon_t\n y = Cgrd.lat_t\n h = Cgrd.h\n if Cpos is 't':\n mask = Cgrd.mask_t[0,:,:]\n elif Cpos is 'u':\n mask = Cgrd.mask_u[0,:,:]\n elif Cpos is 'v':\n mask = Cgrd.mask_v[0,:,:]\n\n nlev, Mm, Lm = varz.shape\n\n if irange is None:\n irange = (0,Lm)\n else:\n assert varz.shape[2] == irange[1]-irange[0], \\\n 'var shape and irange must agreed'\n\n if jrange is None:\n jrange = (0,Mm)\n else:\n assert varz.shape[1] == jrange[1]-jrange[0], \\\n 'var shape and jrange must agreed'\n\n x = x[jrange[0]:jrange[1], irange[0]:irange[1]]\n y = y[jrange[0]:jrange[1], irange[0]:irange[1]]\n h = h[jrange[0]:jrange[1], irange[0]:irange[1]]\n mask = mask[jrange[0]:jrange[1], irange[0]:irange[1]]\n\n # Finding nearest values in horizontal\n # critical deph => no change if depth is less than specified value\n cdepth = abs(cdepth)\n if cdepth != 0:\n idx = np.where(h >= cdepth)\n msk = np.zeros(mask.shape)\n msk[idx] = 1\n else:\n msk = mask.copy()\n for k in range(nlev-1,0,-1):\n c1 = np.array(msk, dtype=bool)\n c2 = np.isnan(varz[k,:,:]) == 1\n if kk == 0:\n c3 = np.ones(mask.shape).astype(bool)\n else:\n c3 = np.isnan(varz[min(k-kk,0),:,:]) == 0\n c = c1 & c2 & c3\n idxnan = np.where(c == True)\n idx = np.where(c2 == False)\n if list(idx[0]):\n wet = np.zeros((len(idx[0]),2))\n dry = np.zeros((len(idxnan[0]),2))\n wet[:,0] = idx[0]+1\n wet[:,1] = idx[1]+1\n dry[:,0] = idxnan[0]+1\n dry[:,1] = idxnan[1]+1\n\n varz[k,:] = _remapping.flood(varz[k,:], wet, dry, x, y, dmax)\n\n # drop the deepest values down\n idx = np.where(np.isnan(varz) == 1)\n varz[idx] = spval\n bottom = pyroms.utility.get_bottom(varz[::-1,:,:], mask, spval=spval)\n bottom = (nlev-1) - bottom\n for i in range(Lm):\n for j in range(Mm):\n if mask[j,i] == 1:\n varz[int(bottom[j,i]):,j,i] = varz[int(bottom[j,i]),j,i]\n\n return varz\n"
] |
[
[
"numpy.isnan",
"numpy.ones",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aanurraj/PySyft
|
[
"1d2c6928b95a2f8164167a8c53f350b188e4533c"
] |
[
"packages/syft/src/syft/core/node/domain/client.py"
] |
[
"# stdlib\nimport logging\nimport sys\nimport time\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Type\nfrom typing import Union\n\n# third party\nfrom nacl.signing import SigningKey\nfrom nacl.signing import VerifyKey\nimport names\nimport pandas as pd\n\n# relative\nfrom .... import deserialize\nfrom ....logger import traceback_and_raise\nfrom ....util import validate_field\nfrom ...common.message import SyftMessage\nfrom ...common.serde.serialize import _serialize as serialize # noqa: F401\nfrom ...common.uid import UID\nfrom ...io.address import Address\nfrom ...io.location import Location\nfrom ...io.location.specific import SpecificLocation\nfrom ...io.route import Route\nfrom ...node.common.node_service.network_search.network_search_messages import (\n NetworkSearchMessage,\n)\nfrom ...pointer.pointer import Pointer\nfrom ...tensor.autodp.adp_tensor import ADPTensor\nfrom ...tensor.tensor import Tensor\nfrom ..abstract.node import AbstractNodeClient\nfrom ..common.action.exception_action import ExceptionMessage\nfrom ..common.client import Client\nfrom ..common.client_manager.association_api import AssociationRequestAPI\nfrom ..common.client_manager.dataset_api import DatasetRequestAPI\nfrom ..common.client_manager.role_api import RoleRequestAPI\nfrom ..common.client_manager.user_api import UserRequestAPI\nfrom ..common.client_manager.vpn_api import VPNAPI\nfrom ..common.node_service.get_remaining_budget.get_remaining_budget_messages import (\n GetRemainingBudgetMessage,\n)\nfrom ..common.node_service.node_setup.node_setup_messages import GetSetUpMessage\nfrom ..common.node_service.node_setup.node_setup_messages import UpdateSetupMessage\nfrom ..common.node_service.object_request.object_request_messages import (\n CreateBudgetRequestMessage,\n)\nfrom ..common.node_service.object_transfer.object_transfer_messages import (\n LoadObjectMessage,\n)\nfrom ..common.node_service.request_receiver.request_receiver_messages import (\n RequestMessage,\n)\nfrom .enums import PyGridClientEnums\nfrom .enums import RequestAPIFields\n\n\nclass RequestQueueClient(AbstractNodeClient):\n def __init__(self, client: Client) -> None:\n self.client = client\n self.handlers = RequestHandlerQueueClient(client=client)\n\n self.users = UserRequestAPI(client=self)\n self.roles = RoleRequestAPI(client=self)\n self.association = AssociationRequestAPI(client=self)\n self.datasets = DatasetRequestAPI(client=self)\n\n @property\n def requests(self) -> List[RequestMessage]:\n\n # relative\n from ..common.node_service.get_all_requests.get_all_requests_messages import (\n GetAllRequestsMessage,\n )\n\n msg = GetAllRequestsMessage(\n address=self.client.address, reply_to=self.client.address\n )\n\n blob = serialize(msg, to_bytes=True)\n msg = deserialize(blob, from_bytes=True)\n\n requests = self.client.send_immediate_msg_with_reply(msg=msg).requests # type: ignore\n\n for request in requests:\n request.gc_enabled = False\n request.owner_client_if_available = self.client\n\n return requests\n\n def get_request_id_from_object_id(self, object_id: UID) -> Optional[UID]:\n for req in self.requests:\n if req.object_id == object_id:\n return req.request_id\n\n return object_id\n\n def __getitem__(self, key: Union[str, int]) -> RequestMessage:\n if isinstance(key, str):\n for request in self.requests:\n if key == str(request.id.value):\n return request\n traceback_and_raise(\n KeyError(\"No such request found for string id:\" + str(key))\n )\n if isinstance(key, int):\n return self.requests[key]\n else:\n traceback_and_raise(KeyError(\"Please pass in a string or int key\"))\n\n raise Exception(\"should not get here\")\n\n def __repr__(self) -> str:\n return repr(self.requests)\n\n def _repr_html_(self) -> str:\n return self.pandas._repr_html_()\n\n @property\n def pandas(self) -> pd.DataFrame:\n request_lines = [\n {\n \"Requested Object's tags\": request.object_tags,\n \"Reason\": request.request_description,\n \"Request ID\": request.id,\n \"Requested Object's ID\": request.object_id,\n \"Requested Object's type\": request.object_type,\n }\n for request in self.requests\n ]\n return pd.DataFrame(request_lines)\n\n def add_handler(\n self,\n action: str,\n print_local: bool = False,\n log_local: bool = False,\n tags: Optional[List[str]] = None,\n timeout_secs: int = -1,\n element_quota: Optional[int] = None,\n ) -> None:\n handler_opts = self._validate_options(\n id=UID(),\n action=action,\n print_local=print_local,\n log_local=log_local,\n tags=tags,\n timeout_secs=timeout_secs,\n element_quota=element_quota,\n )\n\n self._update_handler(handler_opts, keep=True)\n\n def remove_handler(self, key: Union[str, int]) -> None:\n handler_opts = self.handlers[key]\n\n self._update_handler(handler_opts, keep=False)\n\n def clear_handlers(self) -> None:\n for handler in self.handlers.handlers:\n id_str = str(handler[\"id\"].value).replace(\"-\", \"\")\n self.remove_handler(id_str)\n\n def _validate_options(\n self,\n action: str,\n print_local: bool = False,\n log_local: bool = False,\n tags: Optional[List[str]] = None,\n timeout_secs: int = -1,\n element_quota: Optional[int] = None,\n id: Optional[UID] = None,\n ) -> Dict[str, Any]:\n handler_opts: Dict[str, Any] = {}\n if action not in [\"accept\", \"deny\"]:\n traceback_and_raise(Exception(\"Action must be 'accept' or 'deny'\"))\n handler_opts[\"action\"] = action\n handler_opts[\"print_local\"] = bool(print_local)\n handler_opts[\"log_local\"] = bool(log_local)\n\n handler_opts[\"tags\"] = []\n if tags is not None:\n for tag in tags:\n handler_opts[\"tags\"].append(tag)\n handler_opts[\"timeout_secs\"] = max(-1, int(timeout_secs))\n if element_quota is not None:\n handler_opts[\"element_quota\"] = max(0, int(element_quota))\n\n if id is None:\n id = UID()\n handler_opts[\"id\"] = id\n\n return handler_opts\n\n def _update_handler(self, request_handler: Dict[str, Any], keep: bool) -> None:\n # relative\n from ..common.node_service.request_handler.request_handler_messages import (\n UpdateRequestHandlerMessage,\n )\n\n msg = UpdateRequestHandlerMessage(\n address=self.client.address, handler=request_handler, keep=keep\n )\n self.client.send_immediate_msg_without_reply(msg=msg)\n\n\nclass RequestHandlerQueueClient:\n def __init__(self, client: Client) -> None:\n self.client = client\n\n @property\n def handlers(self) -> List[Dict]:\n # relative\n from ..common.node_service.request_handler.request_handler_messages import (\n GetAllRequestHandlersMessage,\n )\n\n msg = GetAllRequestHandlersMessage(\n address=self.client.address, reply_to=self.client.address\n )\n return validate_field(\n self.client.send_immediate_msg_with_reply(msg=msg), \"handlers\"\n )\n\n def __getitem__(self, key: Union[str, int]) -> Dict:\n \"\"\"\n allow three ways to get an request handler:\n 1. use id: str\n 2. use tag: str\n 3. use index: int\n \"\"\"\n if isinstance(key, str):\n matches = 0\n match_handler: Optional[Dict] = None\n for handler in self.handlers:\n if key in str(handler[\"id\"].value).replace(\"-\", \"\"):\n return handler\n if key in handler[\"tags\"]:\n matches += 1\n match_handler = handler\n if matches == 1 and match_handler is not None:\n return match_handler\n elif matches > 1:\n raise KeyError(\"More than one item with tag:\" + str(key))\n\n raise KeyError(\"No such request found for string id:\" + str(key))\n if isinstance(key, int):\n return self.handlers[key]\n else:\n raise KeyError(\"Please pass in a string or int key\")\n\n def __repr__(self) -> str:\n return repr(self.handlers)\n\n @property\n def pandas(self) -> pd.DataFrame:\n def _get_time_remaining(handler: dict) -> int:\n timeout_secs = handler.get(\"timeout_secs\", -1)\n if timeout_secs == -1:\n return -1\n else:\n created_time = handler.get(\"created_time\", 0)\n rem = timeout_secs - (time.time() - created_time)\n return round(rem)\n\n handler_lines = [\n {\n \"tags\": handler[\"tags\"],\n \"ID\": handler[\"id\"],\n \"action\": handler[\"action\"],\n \"remaining time (s):\": _get_time_remaining(handler),\n }\n for handler in self.handlers\n ]\n return pd.DataFrame(handler_lines)\n\n\nclass DomainClient(Client):\n\n domain: SpecificLocation\n requests: RequestQueueClient\n\n def __init__(\n self,\n name: Optional[str],\n routes: List[Route],\n domain: SpecificLocation,\n network: Optional[Location] = None,\n device: Optional[Location] = None,\n vm: Optional[Location] = None,\n signing_key: Optional[SigningKey] = None,\n verify_key: Optional[VerifyKey] = None,\n ):\n super().__init__(\n name=name,\n routes=routes,\n network=network,\n domain=domain,\n device=device,\n vm=vm,\n signing_key=signing_key,\n verify_key=verify_key,\n )\n\n self.requests = RequestQueueClient(client=self)\n\n self.post_init()\n\n self.users = UserRequestAPI(client=self)\n self.roles = RoleRequestAPI(client=self)\n self.association = AssociationRequestAPI(client=self)\n self.datasets = DatasetRequestAPI(client=self)\n self.vpn = VPNAPI(client=self)\n\n @property\n def privacy_budget(self) -> float:\n msg = GetRemainingBudgetMessage(address=self.address, reply_to=self.address)\n return self.send_immediate_msg_with_reply(msg).budget # type: ignore\n\n def request_budget(\n self,\n eps: float = 0.0,\n reason: str = \"\",\n skip_checks: bool = False,\n ) -> Any:\n\n if not skip_checks:\n if eps == 0.0:\n eps = float(input(\"Please specify how much more epsilon you want:\"))\n\n if reason == \"\":\n reason = str(\n input(\"Why should the domain owner give you more epsilon:\")\n )\n\n msg = CreateBudgetRequestMessage(\n reason=reason,\n budget=eps,\n address=self.address,\n )\n\n self.send_immediate_msg_without_reply(msg=msg)\n\n print(\n \"Requested \"\n + str(eps)\n + \" epsilon of budget. Call .privacy_budget to see if your budget has arrived!\"\n )\n\n def load(\n self, obj_ptr: Type[Pointer], address: Address, pointable: bool = False\n ) -> None:\n content = {\n RequestAPIFields.ADDRESS: serialize(address)\n .SerializeToString() # type: ignore\n .decode(PyGridClientEnums.ENCODING),\n RequestAPIFields.UID: str(obj_ptr.id_at_location.value),\n RequestAPIFields.POINTABLE: pointable,\n }\n self._perform_grid_request(grid_msg=LoadObjectMessage, content=content)\n\n def setup(self, *, domain_name: Optional[str], **kwargs: Any) -> Any:\n if domain_name is None:\n domain_name = names.get_full_name() + \"'s Domain\"\n logging.info(\n \"No Domain Name provided... picking randomly as: \" + domain_name\n )\n\n kwargs[\"domain_name\"] = domain_name\n\n response = self.conn.setup(**kwargs) # type: ignore\n logging.info(response[RequestAPIFields.MESSAGE])\n\n def reset(self) -> None:\n logging.warning(\n \"Node reset will delete the data, as well as the requests. Do you want to continue (y/N)?\"\n )\n response = input().lower()\n if response == \"y\":\n response = self.routes[0].connection.reset() # type: ignore\n\n def configure(self, **kwargs: Any) -> Any:\n if \"daa_document\" in kwargs.keys():\n kwargs[\"daa_document\"] = open(kwargs[\"daa_document\"], \"rb\").read()\n else:\n kwargs[\"daa_document\"] = b\"\"\n response = self._perform_grid_request( # type: ignore\n grid_msg=UpdateSetupMessage, content=kwargs\n ).content\n logging.info(response)\n\n @property\n def settings(self, **kwargs: Any) -> Dict[Any, Any]: # type: ignore\n return self._perform_grid_request( # type: ignore\n grid_msg=GetSetUpMessage, content=kwargs\n ).content # type : ignore\n\n def search(self, query: List, pandas: bool = False) -> Any:\n response = self._perform_grid_request(\n grid_msg=NetworkSearchMessage, content={RequestAPIFields.QUERY: query}\n )\n if pandas:\n response = pd.DataFrame(response)\n\n return response\n\n def _perform_grid_request(\n self, grid_msg: Any, content: Optional[Dict[Any, Any]] = None\n ) -> SyftMessage:\n if content is None:\n content = {}\n # Build Syft Message\n content[RequestAPIFields.ADDRESS] = self.address\n content[RequestAPIFields.REPLY_TO] = self.address\n signed_msg = grid_msg(**content).sign(signing_key=self.signing_key)\n # Send to the dest\n response = self.send_immediate_msg_with_reply(msg=signed_msg)\n if isinstance(response, ExceptionMessage):\n raise response.exception_type\n else:\n return response\n\n def get_setup(self, **kwargs: Any) -> Any:\n return self._perform_grid_request(grid_msg=GetSetUpMessage, content=kwargs)\n\n def apply_to_network(\n self, domain_vpn_ip: str, network_vpn_ip: str, **metadata: str\n ) -> None:\n # TODO: refactor\n # Step 1: send a message to the Network from the Domain\n # this means the first message contains the VPN IP for the network\n\n # Step 2: the contents of message is the VPN IPs, because many domains\n # will be behind firewalls, we want to use the VPN IP as both directions should\n # be able to connect to each other\n\n # domain_vpn_ip = Domain VPN host_or_ip because thats the only IP the Network\n # can reach the Domain on, and will go into the association request table\n # after the association is approved these IPs will be added to the node and\n # node_route tables to route all traffic from the Network node to that Domain\n\n # network_vpn_ip = Network VPN host_or_ip because that will be what goes into the\n # association request table and then gets used to route all traffic from\n # the Domain to the Network node\n\n self.association.create(\n source=domain_vpn_ip, target=network_vpn_ip, metadata=metadata\n )\n\n @property\n def id(self) -> UID:\n return self.domain.id\n\n # # TODO: @Madhava make work\n # @property\n # def accountant(self):\n # \"\"\"Queries some service that returns a pointer to the ONLY real accountant for this\n # user that actually affects object permissions when used in a .publish() method. Other accountant\n # objects might exist in the object store but .publish() is just for simulation and won't change\n # the permissions on the object it's called on.\"\"\"\n\n # # TODO: @Madhava make work\n # def create_simulated_accountant(self, init_with_budget_remaining=True):\n # \"\"\"Creates an accountant in the remote store. If init_with_budget_remaining=True then the accountant\n # is a copy of an existing accountant. If init_with_budget_remaining=False then it is a fresh accountant\n # with the sam max budget.\"\"\"\n\n @property\n def device(self) -> Optional[Location]:\n \"\"\"This client points to a node, if that node lives within a device\n or is a device itself, this property will return the Location of that device\n if it is known by the client.\"\"\"\n\n return super().device\n\n @device.setter\n def device(self, new_device: Location) -> Optional[Location]:\n \"\"\"This client points to a node, if that node lives within a device\n or is a device itself and we learn the Location of that device, this setter\n allows us to save the Location of that device for use later. We use a getter\n (@property) and setter (@set) explicitly because we want all clients\n to efficiently save an address object for use when sending messages to their\n target. That address object will include this information if it is available\"\"\"\n\n traceback_and_raise(\n Exception(\"This client points to a domain, you don't need a Device ID.\")\n )\n\n @property\n def vm(self) -> Optional[Location]:\n \"\"\"This client points to a node, if that node lives within a vm\n or is a vm itself, this property will return the Location of that vm\n if it is known by the client.\"\"\"\n\n return super().vm\n\n @vm.setter\n def vm(self, new_vm: Location) -> Optional[Location]:\n \"\"\"This client points to a node, if that node lives within a vm\n or is a vm itself and we learn the Location of that vm, this setter\n allows us to save the Location of that vm for use later. We use a getter\n (@property) and setter (@set) explicitly because we want all clients\n to efficiently save an address object for use when sending messages to their\n target. That address object will include this information if it is available\"\"\"\n\n traceback_and_raise(\n Exception(\"This client points to a device, you don't need a VM Location.\")\n )\n\n def __repr__(self) -> str:\n no_dash = str(self.id).replace(\"-\", \"\")\n return f\"<{type(self).__name__} - {self.name}: {no_dash}>\"\n\n def update_vars(self, state: dict) -> pd.DataFrame:\n for ptr in self.store.store:\n tags = getattr(ptr, \"tags\", None)\n if tags is not None:\n for tag in tags:\n state[tag] = ptr\n return self.store.pandas\n\n def join_network(self, host_or_ip: str) -> None:\n return self.vpn.join_network(host_or_ip=host_or_ip)\n\n def vpn_status(self) -> Dict[str, Any]:\n return self.vpn.get_status()\n\n def load_dataset(\n self,\n assets: Optional[dict] = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n skip_checks: bool = False,\n **metadata: Dict,\n ) -> None:\n sys.stdout.write(\"Loading dataset...\")\n if assets is None or not isinstance(assets, dict):\n raise Exception(\n \"Missing Assets: Oops!... You forgot to include your data! (or you passed it in the wrong way) \\n\\n\"\n \"You must call load_dataset() with a dictionary of assets which are the \"\n \"private dataset objects (tensors) you wish to allow someone else to study \"\n \"while PySyft protects it using various privacy enhancing technologies. \\n\\n\"\n \"For example, the MNIST dataset is comprised of 6 tensors, so we would create an assets \"\n \"dictionary with 6 keys (strings) mapping to the 6 tensors of MNIST.\\n\\n\"\n \"Please pass in a dictionary where the key is the name of the asset and the value is \"\n \"the private dataset object (tensor) itself. We recommend uploading assets which \"\n \"are differential-privacy trackable objects, such as a syft.Tensor() wrapped \"\n \"numpy.int32 or numpy.float32 object which you then call .private() on. \\n\\nOnce \"\n \"you have an assets dictionary call load_dataset(assets=<your dict of objects>).\"\n )\n sys.stdout.write(\"\\rLoading dataset... checking assets...\")\n\n if name is None:\n raise Exception(\n \"Missing Name: Oops!... You forgot to name your dataset!\\n\\n\"\n \"It's important to give your dataset a clear and descriptive name because\"\n \" the name is the primary way in which potential users of the dataset will\"\n \" identify it.\\n\\n\"\n 'Retry with a string name. I.e., .load_dataset(name=\"<your name here>)\"'\n )\n sys.stdout.write(\"\\rLoading dataset... checking dataset name for uniqueness...\")\n datasets = self.datasets\n\n if not skip_checks:\n for i in range(len(datasets)):\n d = datasets[i]\n sys.stdout.write(\".\")\n if name == d.name:\n print(\n \"\\n\\nWARNING - Dataset Name Conflict: A dataset named '\"\n + name\n + \"' already exists.\\n\"\n )\n pref = input(\"Do you want to upload this dataset anyway? (y/n)\")\n while pref != \"y\" and pref != \"n\":\n pref = input(\n \"Invalid input '\" + pref + \"', please specify 'y' or 'n'.\"\n )\n if pref == \"n\":\n raise Exception(\"Dataset loading cancelled.\")\n else:\n print() # just for the newline\n break\n\n sys.stdout.write(\n \"\\rLoading dataset... checking dataset name for uniqueness...\"\n \" \"\n \" \"\n )\n\n if description is None:\n raise Exception(\n \"Missing Description: Oops!... You forgot to describe your dataset!\\n\\n\"\n \"It's *very* important to give your dataset a very clear and complete description\"\n \" because your users will need to be able to find this dataset (the description is used for search)\"\n \" AND they will need enough information to be able to know that the dataset is what they're\"\n \" looking for AND how to use it.\\n\\n\"\n \"Start by describing where the dataset came from, how it was collected, and how its formatted.\"\n \"Refer to each object in 'assets' individually so that your users will know which is which. Don't\"\n \" be afraid to be longwinded. :) Your users will thank you.\"\n )\n\n sys.stdout.write(\n \"\\rLoading dataset... checking asset types... \"\n )\n\n # relative\n from ....lib.python.util import downcast\n\n if not skip_checks:\n for asset_name, asset in assets.items():\n\n if not isinstance(asset, Tensor) or not isinstance(\n getattr(asset, \"child\", None), ADPTensor\n ):\n\n print(\n \"\\n\\nWARNING - Non-DP Asset: You just passed in a asset '\"\n + asset_name\n + \"' which cannot be tracked with differential privacy because it is a \"\n + str(type(asset))\n + \" object.\\n\\n\"\n + \"This means you'll need to manually approve any requests which \"\n + \"leverage this data. If this is ok with you, proceed. If you'd like to use \"\n + \"automatic differential privacy budgeting, please pass in a DP-compatible tensor type \"\n + \"such as by calling .private() on a sy.Tensor with a np.int32 or np.float32 inside.\"\n )\n\n pref = input(\"Are you sure you want to proceed? (y/n)\")\n\n while pref != \"y\" and pref != \"n\":\n pref = input(\n \"Invalid input '\" + pref + \"', please specify 'y' or 'n'.\"\n )\n if pref == \"n\":\n raise Exception(\"Dataset loading cancelled.\")\n\n metadata[\"name\"] = bytes(name, \"utf-8\") # type: ignore\n metadata[\"description\"] = bytes(description, \"utf-8\") # type: ignore\n\n for k, v in metadata.items():\n if isinstance(v, str): # type: ignore\n metadata[k] = bytes(v, \"utf-8\") # type: ignore\n\n assets = downcast(assets)\n metadata = downcast(metadata)\n\n binary_dataset = serialize(assets, to_bytes=True)\n\n sys.stdout.write(\"\\rLoading dataset... uploading... \")\n self.datasets.create_syft(\n dataset=binary_dataset, metadata=metadata, platform=\"syft\"\n )\n sys.stdout.write(\n \"\\rLoading dataset... uploading... SUCCESS! \"\n )\n\n print(\n \"\\n\\nRun <your client variable>.datasets to see your new dataset loaded into your machine!\"\n )\n\n def create_dataset(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n skip_checks: bool = False,\n **metadata: Dict,\n ) -> None:\n # relative\n from ....lib.python.util import downcast\n\n if name is None:\n raise Exception(\n \"Missing Name: Oops!... You forgot to name your dataset!\\n\\n\"\n \"It's important to give your dataset a clear and descriptive name because\"\n \" the name is the primary way in which potential users of the dataset will\"\n \" identify it.\\n\\n\"\n 'Retry with a string name. I.e., .load_dataset(name=\"<your name here>)\"'\n )\n\n datasets = self.datasets\n\n if not skip_checks:\n for i in range(len(datasets)):\n d = datasets[i]\n sys.stdout.write(\".\")\n if name == d.name:\n print(\n \"\\n\\nWARNING - Dataset Name Conflict: A dataset named '\"\n + name\n + \"' already exists.\\n\"\n )\n pref = input(\"Do you want to upload this dataset anyway? (y/n)\")\n while pref != \"y\" and pref != \"n\":\n pref = input(\n \"Invalid input '\" + pref + \"', please specify 'y' or 'n'.\"\n )\n if pref == \"n\":\n raise Exception(\"Dataset loading cancelled.\")\n else:\n print() # just for the newline\n break\n\n if description is None:\n raise Exception(\n \"Missing Description: Oops!... You forgot to describe your dataset!\\n\\n\"\n \"It's *very* important to give your dataset a very clear and complete description\"\n \" because your users will need to be able to find this dataset (the description is used for search)\"\n \" AND they will need enough information to be able to know that the dataset is what they're\"\n \" looking for AND how to use it.\\n\\n\"\n \"Start by describing where the dataset came from, how it was collected, and how its formatted.\"\n \"Refer to each object in 'assets' individually so that your users will know which is which. Don't\"\n \" be afraid to be longwinded. :) Your users will thank you.\"\n )\n\n metadata[\"name\"] = bytes(name, \"utf-8\") # type: ignore\n metadata[\"description\"] = bytes(description, \"utf-8\") # type: ignore\n\n for k, v in metadata.items():\n if isinstance(v, str): # type: ignore\n metadata[k] = bytes(v, \"utf-8\") # type: ignore\n\n assets = downcast({})\n binary_dataset = serialize(assets, to_bytes=True)\n\n metadata = downcast(metadata)\n\n self.datasets.create_syft(\n dataset=binary_dataset, metadata=metadata, platform=\"syft\"\n )\n sys.stdout.write(\"Creating an empty dataset... Creating... SUCCESS!\")\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pferido/Energy-Disaggregation
|
[
"5558e65bd2c3fd4c602bf76504a8acd2c51ae23e"
] |
[
"Tuning_Notebooks/model_structure.py"
] |
[
"import tensorflow as tf \nimport os\n\ndef create_model(input_window_length):\n\n \"\"\"Specifies the structure of a seq2point model using Keras' functional API.\n\n Returns:\n model (tensorflow.keras.Model): The uncompiled seq2point model.\n\n \"\"\"\n\n input_layer = tf.keras.layers.Input(shape=(input_window_length,))\n reshape_layer = tf.keras.layers.Reshape((1, input_window_length, 1))(input_layer)\n conv_layer_1 = tf.keras.layers.Convolution2D(filters=30, kernel_size=(10, 1), strides=(1, 1), padding=\"same\", activation=\"relu\")(reshape_layer)\n conv_layer_2 = tf.keras.layers.Convolution2D(filters=30, kernel_size=(8, 1), strides=(1, 1), padding=\"same\", activation=\"relu\")(conv_layer_1)\n conv_layer_3 = tf.keras.layers.Convolution2D(filters=40, kernel_size=(6, 1), strides=(1, 1), padding=\"same\", activation=\"relu\")(conv_layer_2)\n conv_layer_4 = tf.keras.layers.Convolution2D(filters=50, kernel_size=(5, 1), strides=(1, 1), padding=\"same\", activation=\"relu\")(conv_layer_3)\n conv_layer_5 = tf.keras.layers.Convolution2D(filters=50, kernel_size=(5, 1), strides=(1, 1), padding=\"same\", activation=\"relu\")(conv_layer_4)\n flatten_layer = tf.keras.layers.Flatten()(conv_layer_5)\n label_layer = tf.keras.layers.Dense(1024, activation=\"relu\")(flatten_layer)\n output_layer = tf.keras.layers.Dense(1, activation=\"linear\")(label_layer)\n\n model = tf.keras.Model(inputs=input_layer, outputs=output_layer)\n return model\n\ndef save_model(model, network_type, algorithm, appliance, save_model_dir):\n\n \"\"\" Saves a model to a specified location. Models are named using a combination of their \n target appliance, architecture, and pruning algorithm.\n\n Parameters:\n model (tensorflow.keras.Model): The Keras model to save.\n network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout').\n algorithm (string): The pruning algorithm applied to the model.\n appliance (string): The appliance the model was trained with.\n\n \"\"\"\n \n #model_path = \"saved_models/\" + appliance + \"_\" + algorithm + \"_\" + network_type + \"_model.h5\"\n model_path = save_model_dir\n\n if not os.path.exists (model_path):\n open((model_path), 'a').close()\n\n model.save(model_path)\n print(\"SAVED\")\n\ndef load_model(model, network_type, algorithm, appliance, saved_model_dir):\n\n \"\"\" Loads a model from a specified location.\n\n Parameters:\n model (tensorflow.keras.Model): The Keas model to which the loaded weights will be applied to.\n network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout').\n algorithm (string): The pruning algorithm applied to the model.\n appliance (string): The appliance the model was trained with.\n\n \"\"\"\n\n #model_name = \"saved_models/\" + appliance + \"_\" + algorithm + \"_\" + network_type + \"_model.h5\"\n model_name = saved_model_dir\n print(\"PATH NAME: \", model_name)\n\n model = tf.keras.models.load_model(model_name)\n num_of_weights = model.count_params()\n print(\"Loaded model with \", str(num_of_weights), \" weights\")\n return model"
] |
[
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Convolution2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.