repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
jordiclive/ControlPrefixes
[ "b647f68bf0c7e771f847c4a51e5f22af2ac95699" ]
[ "transformers/examples/seq2seq/finetune_trainer.py" ]
[ "import json\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\n\nfrom seq2seq_trainer import Seq2SeqTrainer\nfrom transformers import (\n AutoConfig,\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n BartTokenizer,\n EvalPrediction,\n HfArgumentParser,\n MBartTokenizer,\n T5Tokenizer,\n TrainingArguments,\n set_seed,\n)\nfrom transformers.modeling_bart import shift_tokens_right\nfrom utils import (\n LegacySeq2SeqDataset,\n Seq2SeqDataset,\n assert_all_frozen,\n calculate_bleu,\n calculate_rouge,\n freeze_params,\n lmap,\n trim_batch,\n use_task_specific_params,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Seq2SeqDataCollator:\n def __init__(self, tokenizer, data_args, tpu_num_cores=None):\n self.tokenizer = tokenizer\n self.pad_token_id = tokenizer.pad_token_id\n self.data_args = data_args\n self.tpu_num_cores = tpu_num_cores\n self.add_prefix_space = isinstance(tokenizer, BartTokenizer)\n\n def __call__(self, batch) -> Dict[str, torch.Tensor]:\n if hasattr(self.tokenizer, \"prepare_seq2seq_batch\"):\n batch = self._encode(batch)\n input_ids, attention_mask, labels = (\n batch[\"input_ids\"],\n batch[\"attention_mask\"],\n batch[\"labels\"],\n )\n else:\n input_ids = torch.stack([x[\"input_ids\"] for x in batch])\n attention_mask = torch.stack([x[\"attention_mask\"] for x in batch])\n labels = torch.stack([x[\"labels\"] for x in batch])\n\n labels = trim_batch(labels, self.pad_token_id)\n input_ids, attention_mask = trim_batch(input_ids, self.pad_token_id, attention_mask=attention_mask)\n\n if isinstance(self.tokenizer, T5Tokenizer):\n decoder_input_ids = self._shift_right_t5(labels)\n labels = labels\n else:\n decoder_input_ids = shift_tokens_right(labels, self.pad_token_id)\n labels = labels\n\n batch = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"decoder_input_ids\": decoder_input_ids,\n \"labels\": labels,\n }\n return batch\n\n def _shift_right_t5(self, input_ids):\n decoder_start_token_id = self.pad_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information\"\n\n # shift inputs to the right\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()\n shifted_input_ids[..., 0] = decoder_start_token_id\n\n return shifted_input_ids\n\n def _encode(self, batch) -> Dict[str, torch.Tensor]:\n batch_encoding = self.tokenizer.prepare_seq2seq_batch(\n [x[\"src_texts\"] for x in batch],\n src_lang=self.data_args.src_lang,\n tgt_texts=[x[\"tgt_texts\"] for x in batch],\n tgt_lang=self.data_args.tgt_lang,\n max_length=self.data_args.max_source_length,\n max_target_length=self.data_args.max_target_length,\n padding=\"max_length\" if self.tpu_num_cores is not None else \"longest\", # TPU hack\n return_tensors=\"pt\",\n add_prefix_space=self.add_prefix_space,\n )\n return batch_encoding.data\n\n\n@dataclass\nclass Seq2SeqTrainingArguments(TrainingArguments):\n \"\"\"\n Parameters:\n label_smoothing (:obj:`float`, `optional`, defaults to 0):\n The label smoothing epsilon to apply (if not zero).\n sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to SortishSamler or not. It sorts the inputs according to lenghts in-order to minimizing the padding size.\n predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to use generate to calculate generative metrics (ROUGE, BLEU).\n \"\"\"\n\n label_smoothing: Optional[float] = field(\n default=0.0, metadata={\"help\": \"The label smoothing epsilon to apply (if not zero).\"}\n )\n sortish_sampler: bool = field(default=False, metadata={\"help\": \"Whether to SortishSamler or not.\"})\n predict_with_generate: bool = field(\n default=False, metadata={\"help\": \"Whether to use generate to calculate generative metrics (ROUGE, BLEU).\"}\n )\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n freeze_encoder: bool = field(default=False, metadata={\"help\": \"Whether tp freeze the encoder.\"})\n freeze_embeds: bool = field(default=False, metadata={\"help\": \"Whether to freeze the embeddings.\"})\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n data_dir: str = field(\n metadata={\"help\": \"The input data dir. Should contain the .tsv files (or other data files) for the task.\"}\n )\n task: Optional[str] = field(\n default=\"GEC\",\n metadata={\"help\": \"Task name, GEC (or summarization_{dataset} for pegasus) or translation\"},\n )\n max_source_length: Optional[int] = field(\n default=1024,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n max_target_length: Optional[int] = field(\n default=128,\n metadata={\n \"help\": \"The maximum total sequence length for target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n val_max_target_length: Optional[int] = field(\n default=142,\n metadata={\n \"help\": \"The maximum total sequence length for validation target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n test_max_target_length: Optional[int] = field(\n default=142,\n metadata={\n \"help\": \"The maximum total sequence length for test target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n n_train: Optional[int] = field(default=-1, metadata={\"help\": \"# training examples. -1 means use all.\"})\n n_val: Optional[int] = field(default=-1, metadata={\"help\": \"# validation examples. -1 means use all.\"})\n n_test: Optional[int] = field(default=-1, metadata={\"help\": \"# test examples. -1 means use all.\"})\n src_lang: Optional[str] = field(default=None, metadata={\"help\": \"Source language id for translation.\"})\n tgt_lang: Optional[str] = field(default=None, metadata={\"help\": \"Target language id for translation.\"})\n eval_beams: Optional[int] = field(default=None, metadata={\"help\": \"# num_beams to use for evaluation.\"})\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))\n\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n )\n model = AutoModelForSeq2SeqLM.from_pretrained(\n model_args.model_name_or_path,\n from_tf=\".ckpt\" in model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # use task specific params\n use_task_specific_params(model, data_args.task)\n\n # set num_beams for evaluation\n if data_args.eval_beams is not None:\n model.config.num_beams = data_args.eval_beams\n assert model.config.num_beams >= 1, f\"got eval_beams={model.config.num_beams}. Need an integer >= 1\"\n\n # set max length for generation\n model.config.max_generate_length = data_args.val_max_target_length\n\n # set decoder_start_token_id for MBart\n if model.config.decoder_start_token_id is None and isinstance(tokenizer, MBartTokenizer):\n decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang]\n model.config.decoder_start_token_id = decoder_start_token_id\n\n def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:\n def non_pad_len(tokens: np.ndarray) -> int:\n return np.count_nonzero(tokens != tokenizer.pad_token_id)\n\n def decode_pred(pred: EvalPrediction) -> Tuple[List[str], List[str]]:\n pred_str = tokenizer.batch_decode(pred.predictions, skip_special_tokens=True)\n label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)\n pred_str = lmap(str.strip, pred_str)\n label_str = lmap(str.strip, label_str)\n return pred_str, label_str\n\n def summarization_metrics(pred: EvalPrediction) -> Dict:\n pred_str, label_str = decode_pred(pred)\n rouge: Dict = calculate_rouge(pred_str, label_str)\n summ_len = np.mean(lmap(non_pad_len, pred.predictions))\n rouge.update({\"gen_len\": summ_len})\n return rouge\n\n def translation_metrics(pred: EvalPrediction) -> Dict:\n pred_str, label_str = decode_pred(pred)\n bleu: Dict = calculate_bleu(pred_str, label_str)\n gen_len = np.mean(lmap(non_pad_len, pred.predictions))\n bleu.update({\"gen_len\": gen_len})\n return bleu\n\n compute_metrics_fn = summarization_metrics if \"GEC\" in task_name else translation_metrics\n return compute_metrics_fn\n\n def freeze_embeds(model: torch.nn.Module):\n \"\"\"Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.\"\"\"\n try:\n freeze_params(model.model.shared)\n for d in [model.model.encoder, model.model.decoder]:\n freeze_params(d.embed_positions)\n freeze_params(d.embed_tokens)\n except AttributeError:\n freeze_params(model.shared)\n for d in [model.encoder, model.decoder]:\n freeze_params(d.embed_tokens)\n\n if model_args.freeze_embeds:\n freeze_embeds(model)\n if model_args.freeze_encoder:\n freeze_params(model.get_encoder())\n assert_all_frozen(model.get_encoder())\n\n dataset_class = Seq2SeqDataset if hasattr(tokenizer, \"prepare_seq2seq_batch\") else LegacySeq2SeqDataset\n\n # Get datasets\n train_dataset = (\n dataset_class(\n tokenizer,\n type_path=\"train\",\n data_dir=data_args.data_dir,\n n_obs=data_args.n_train,\n max_target_length=data_args.max_target_length,\n max_source_length=data_args.max_source_length,\n prefix=model.config.prefix or \"\",\n )\n if training_args.do_train\n else None\n )\n eval_dataset = (\n dataset_class(\n tokenizer,\n type_path=\"val\",\n data_dir=data_args.data_dir,\n n_obs=data_args.n_val,\n max_target_length=data_args.val_max_target_length,\n max_source_length=data_args.max_source_length,\n prefix=model.config.prefix or \"\",\n )\n if training_args.do_eval\n else None\n )\n test_dataset = (\n dataset_class(\n tokenizer,\n type_path=\"test\",\n data_dir=data_args.data_dir,\n n_obs=data_args.n_test,\n max_target_length=data_args.test_max_target_length,\n max_source_length=data_args.max_source_length,\n prefix=model.config.prefix or \"\",\n )\n if training_args.do_predict\n else None\n )\n\n # Initialize our Trainer\n trainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n data_collator=Seq2SeqDataCollator(tokenizer, data_args, training_args.tpu_num_cores),\n compute_metrics=build_compute_metrics_fn(data_args.task) if training_args.predict_with_generate else None,\n )\n\n # Training\n if training_args.do_train:\n trainer.train(\n model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None\n )\n trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_process_zero():\n tokenizer.save_pretrained(training_args.output_dir)\n\n # Evaluation\n eval_results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n result = trainer.evaluate()\n\n output_eval_file = os.path.join(training_args.output_dir, \"eval_results.json\")\n if trainer.is_world_process_zero():\n logger.info(\"***** Eval results *****\")\n for key, value in result.items():\n logger.info(\" %s = %s\", key, value)\n\n with open(output_eval_file, \"w\") as f:\n json.dump(result, f)\n\n eval_results.update(result)\n\n if training_args.do_predict:\n logging.info(\"*** Test ***\")\n\n test_output = trainer.predict(test_dataset=test_dataset)\n test_metrics = test_output.metrics\n test_metrics = {k.replace(\"eval\", \"test\"): v for k, v in test_metrics.items()}\n\n output_test_file = os.path.join(training_args.output_dir, \"test_results.json\")\n\n if trainer.is_world_process_zero():\n logger.info(\"***** Test results *****\")\n for key, value in test_metrics.items():\n logger.info(\" %s = %s\", key, value)\n\n with open(output_test_file, \"w\") as f:\n json.dump(test_metrics, f)\n\n if training_args.predict_with_generate:\n test_preds = tokenizer.batch_decode(test_output.predictions, skip_special_tokens=True)\n test_preds = lmap(str.strip, test_preds)\n output_test_pred_file = os.path.join(training_args.output_dir, \"test_generations.txt\")\n with open(output_test_pred_file, \"w\") as f:\n f.write(\"\\n\".join(test_preds))\n\n return eval_results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.count_nonzero", "torch.stack" ] ]
mcreng/COMP4901K-ML4NLP
[ "14664b4545f2c2ed9437a1869bb675eed0081fca" ]
[ "Proj3/data_helper.py" ]
[ "import math\nimport json\nimport os\nimport numpy as np\nimport pandas as pd\nimport itertools\n\ndef build_input_data(sentences, vocabulary, training=False):\n \"\"\"\n Maps sentences and labels to vectors based on a vocabulary.\n \n Args:\n sentences (pd.Dataframe): date frame of raw sentences\n vocabulary (dict): Dictionary of vocab key pairs\n\n Returns:\n (list[int]): index list\n (iterator): input word ids\n (iterator): target word ids\n int: number of sentences\n \"\"\"\n unknown_token_id = vocabulary[\"<unk>\"]\n vocab = vocabulary.keys()\n\n def sent2idx(sentence):\n \"\"\"\n Converts words into ids from vocab.json\n\n Args:\n sentence (str): Raw string of sentence\n\n Returns:\n sentence (list[int]): List of word ids\n \"\"\"\n sentence = str(sentence).split(' ')\n sentence = [vocabulary[word] if word in vocab else unknown_token_id for word in sentence]\n return sentence\n\n sentences = sentences.applymap(sent2idx)\n \n sentences['target'] = (sentences['sentence'].map(lambda a: a[1:]) + sentences['label']).map(lambda a: np.array([np.array([a]).T]))\n sentences['sentence'] = sentences['sentence'].map(lambda a: np.array([a]))\n if training:\n return sentences.index.tolist(), zip(sentences['sentence'].tolist(), sentences['target'].tolist()), len(sentences)\n else:\n return sentences.index.tolist(), itertools.cycle(zip(sentences['sentence'].tolist(), sentences['target'].tolist())), len(sentences)\n\n\ndef load_data(data_path, file, **kwargs):\n \"\"\"\n Load data for training.\n\n Args:\n data_path (str): Data path\n file (str): filename\n\n Returns:\n (list[int]): index list\n (iterator): training data sets of (x, y)\n (iterator): validation data sets of (x, y)\n num_training_data (int): number of training data\n num_valid_data (int): number of validation data\n vocab_size (int): size of vocabulary\n \"\"\"\n # get the data paths\n path = os.path.join(data_path, \"{}.csv\".format(file))\n vocab_path = os.path.join(data_path, \"vocab.json\")\n\n # build vocabulary from training data\n vocabulary = json.load(open(vocab_path))\n vocab_size = len(vocabulary)\n\n # get input data\n idx, data, num_data = build_input_data(pd.read_csv(path, index_col=0), vocabulary, kwargs)\n\n return idx, data, num_data, vocab_size\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
junho-m/rain
[ "88946ab2c727ae23054c77b6eb1381379b186b74" ]
[ "keras/gpu.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# ##### Copyright 2018 The TensorFlow Authors.\n# \n\n# In[ ]:\n\n\n#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# # GPU ์‚ฌ์šฉํ•˜๊ธฐ\n# \n# <table class=\"tfo-notebook-buttons\" align=\"left\">\n# <td>\n# <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/gpu\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />TensorFlow.org์—์„œ ๋ณด๊ธฐ</a>\n# </td>\n# <td>\n# <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/gpu.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />๊ตฌ๊ธ€ ์ฝ”๋žฉ(Colab)์—์„œ ์‹คํ–‰ํ•˜๊ธฐ</a>\n# </td>\n# <td>\n# <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/gpu.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />๊นƒํ—ˆ๋ธŒ(GitHub) ์†Œ์Šค ๋ณด๊ธฐ</a>\n# </td>\n# </table>\n\n# Note: ์ด ๋ฌธ์„œ๋Š” ํ…์„œํ”Œ๋กœ ์ปค๋ฎค๋‹ˆํ‹ฐ์—์„œ ๋ฒˆ์—ญํ–ˆ์Šต๋‹ˆ๋‹ค. ์ปค๋ฎค๋‹ˆํ‹ฐ ๋ฒˆ์—ญ ํ™œ๋™์˜ ํŠน์„ฑ์ƒ ์ •ํ™•ํ•œ ๋ฒˆ์—ญ๊ณผ ์ตœ์‹  ๋‚ด์šฉ์„ ๋ฐ˜์˜ํ•˜๊ธฐ ์œ„ํ•ด ๋…ธ๋ ฅํ•จ์—๋„\n# ๋ถˆ๊ตฌํ•˜๊ณ  [๊ณต์‹ ์˜๋ฌธ ๋ฌธ์„œ](https://www.tensorflow.org/?hl=en)์˜ ๋‚ด์šฉ๊ณผ ์ผ์น˜ํ•˜์ง€ ์•Š์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.\n# ์ด ๋ฒˆ์—ญ์— ๊ฐœ์„ ํ•  ๋ถ€๋ถ„์ด ์žˆ๋‹ค๋ฉด\n# [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) ๊นƒํ—™ ์ €์žฅ์†Œ๋กœ ํ’€ ๋ฆฌํ€˜์ŠคํŠธ๋ฅผ ๋ณด๋‚ด์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค.\n# ๋ฌธ์„œ ๋ฒˆ์—ญ์ด๋‚˜ ๋ฆฌ๋ทฐ์— ์ฐธ์—ฌํ•˜๋ ค๋ฉด\n# [[email protected]](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)๋กœ\n# ๋ฉ”์ผ์„ ๋ณด๋‚ด์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค.\n\n# ## ์„ค์ •\n# \n# ์ตœ์‹  ๋ฒ„์ „์˜ ํ…์„œํ”Œ๋กœ๊ฐ€ ์„ค์น˜๋˜์–ด์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”.\n\n# In[ ]:\n\n\nimport tensorflow as tf\n\n\n# ## ์žฅ์น˜ ํ• ๋‹น ๋กœ๊น…\n# \n# ์—ฐ์‚ฐ๊ณผ ํ…์„œ๊ฐ€ ์–ด๋–ค ์žฅ์น˜์— ํ• ๋‹น๋˜์—ˆ๋Š”์ง€ ํ™•์ธํ•˜๋ ค๋ฉด `tf.debugging.set_log_device_placement(True)`๋ฅผ ํ”„๋กœ๊ทธ๋žจ์˜ ๊ฐ€์žฅ ์ฒ˜์Œ์— ์„ ์–ธํ•˜์„ธ์š”. ์žฅ์น˜ ํ• ๋‹น ๋กœ๊น…์„ ํ™œ์„ฑํ™”ํ•˜๋ฉด ๋ชจ๋“  ํ…์„œ๋‚˜ ์—ฐ์‚ฐ ํ• ๋‹น์ด ์ถœ๋ ฅ๋ฉ๋‹ˆ๋‹ค.\n\n# In[ ]:\n\n\ntf.debugging.set_log_device_placement(True)\n\n# ํ…์„œ ์ƒ์„ฑ\na = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nb = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])\nc = tf.matmul(a, b)\n\nprint(c)\n\n\n# ์œ„ ์ฝ”๋“œ๋Š” `MatMul` ์—ฐ์‚ฐ์ด `GPU:0`์—์„œ ์ˆ˜ํ–‰๋˜์—ˆ๋‹ค๊ณ  ๋ณด์—ฌ์ค„ ๊ฒƒ์ž…๋‹ˆ๋‹ค.\n\n# ## ์žฅ์น˜ ์ˆ˜๋™ ํ• ๋‹น\n# \n# ํŠน์ • ์—ฐ์‚ฐ์„ ์ˆ˜ํ–‰ํ•  ์žฅ์น˜๋ฅผ ์ง์ ‘ ์„ ํƒํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด, `with tf.device`๋กœ ์žฅ์น˜ ์ปจํ…์ŠคํŠธ๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ๊ณ  ํ•ด๋‹น ์ปจํ…์ŠคํŠธ์—์„œ์˜ ๋ชจ๋“  ์—ฐ์‚ฐ์€ ์ง€์ •๋œ ์žฅ์น˜์—์„œ ์ˆ˜ํ–‰๋ฉ๋‹ˆ๋‹ค.\n\n# In[ ]:\n\n\ntf.debugging.set_log_device_placement(True)\n\n# ํ…์„œ๋ฅผ CPU์— ํ• ๋‹น\nwith tf.device('/CPU:0'):\n a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])\n\nc = tf.matmul(a, b)\nprint(c)\n\n\n# `a`์™€ `b`๊ฐ€ `CPU:0`์— ํ• ๋‹น๋˜์—ˆ์Šต๋‹ˆ๋‹ค. `MatMul` ์—ฐ์‚ฐ์€ ์ˆ˜ํ–‰ํ•  ์žฅ์น˜๊ฐ€ ๋ช…์‹œ์ ์œผ๋กœ ํ• ๋‹น๋˜์–ด ์žˆ์ง€ ์•Š๊ธฐ ๋•Œ๋ฌธ์— ํ…์„œํ”Œ๋กœ ๋Ÿฐํƒ€์ž„(runtime)์€ ์—ฐ์‚ฐ๊ณผ ๊ฐ€์šฉํ•œ ์žฅ์น˜๋“ค(์ด ์˜ˆ์ œ์—์„œ๋Š” `GPU:0`)์„ ๊ธฐ๋ฐ˜์œผ๋กœ ํ•˜๋‚˜๋ฅผ ๊ณ ๋ฅผ ๊ฒƒ์ด๊ณ  ํ•„์š”ํ•˜๋‹ค๋ฉด ์žฅ์น˜๋“ค๊ฐ„์— ํ…์„œ๋ฅผ ์ž๋™์œผ๋กœ ๋ณต์‚ฌํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค.\n\n# ## GPU ๋ฉ”๋ชจ๋ฆฌ ์ œํ•œํ•˜๊ธฐ\n# \n# ๊ธฐ๋ณธ์ ์œผ๋กœ ํ…์„œํ”Œ๋กœ๋Š” ๋ชจ๋“  GPU์˜ ๊ฑฐ์˜ ๋ชจ๋“  ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ํ”„๋กœ์„ธ์Šค๊ฐ€ ๋ณผ ์ˆ˜ ์žˆ๋„๋ก ๋งคํ•‘ํ•ฉ๋‹ˆ๋‹ค([`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars)์— ํฌํ•จ๋˜์—ˆ๋‹ค๊ณ  ๊ฐ€์ •ํ•ฉ๋‹ˆ๋‹ค). ์ด๋Š” ๋ฉ”๋ชจ๋ฆฌ ๋‹จํŽธํ™”๋ฅผ ์ค„์—ฌ์„œ ์ƒ๋Œ€์ ์œผ๋กœ ๊ท€ํ•œ GPU ๋ฉ”๋ชจ๋ฆฌ ๋ฆฌ์†Œ์Šค๋ฅผ ์žฅ์น˜์—์„œ ๋ณด๋‹ค ํšจ์œจ์ ์œผ๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค. `tf.config.experimental.set_visible_devices` ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํ…์„œํ”Œ๋กœ์—์„œ ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” GPU๋ฅผ ์กฐ์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.\n\n# In[ ]:\n\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # ํ…์„œํ”Œ๋กœ๊ฐ€ ์ฒซ ๋ฒˆ์งธ GPU๋งŒ ์‚ฌ์šฉํ•˜๋„๋ก ์ œํ•œ\n try:\n tf.config.experimental.set_visible_devices(gpus[0], 'GPU')\n except RuntimeError as e:\n # ํ”„๋กœ๊ทธ๋žจ ์‹œ์ž‘์‹œ์— ์ ‘๊ทผ ๊ฐ€๋Šฅํ•œ ์žฅ์น˜๊ฐ€ ์„ค์ •๋˜์–ด์•ผ๋งŒ ํ•ฉ๋‹ˆ๋‹ค\n print(e)\n\n\n# ์–ด๋–ค ๊ฒฝ์šฐ์—๋Š” ํ”„๋กœ์„ธ์Šค๊ฐ€ ๊ฐ€์šฉํ•œ ๋ฉ”๋ชจ๋ฆฌ์˜ ์ผ๋ถ€์—๋งŒ ํ• ๋‹น๋˜๋„๋ก ํ•˜๊ฑฐ๋‚˜ ํ”„๋กœ์„ธ์Šค์˜ ์š”๊ตฌ๋Ÿ‰๋งŒํผ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ์ด ๊ฐ€๋Šฅํ•  ํ•„์š”๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ํ…์„œํ”Œ๋กœ์—์„œ๋Š” ์ด๋ฅผ ์œ„ํ•ด ๋‘ ๊ฐ€์ง€ ๋ฐฉ๋ฒ•์„ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค.\n# \n# ์ฒซ ๋ฒˆ์งธ ๋ฐฉ๋ฒ•์€ `tf.config.experimental.set_memory_growth`๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ ๋ฉ”๋ชจ๋ฆฌ ์ฆ๊ฐ€๋ฅผ ํ—ˆ์šฉํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด๋Š” ๋Ÿฐํƒ€์ž„์—์„œ ํ• ๋‹นํ•˜๋Š”๋ฐ ํ•„์š”ํ•œ ์–‘๋งŒํผ์˜ GPU ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ํ• ๋‹นํ•ฉ๋‹ˆ๋‹ค: ์ฒ˜์Œ์—๋Š” ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์กฐ๊ธˆ๋งŒ ํ• ๋‹นํ•˜๊ณ , ํ”„๋กœ๊ทธ๋žจ์ด ์‹คํ–‰๋˜์–ด ๋” ๋งŽ์€ GPU ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ ํ•„์š”ํ•˜๋ฉด, ํ…์„œํ”Œ๋กœ ํ”„๋กœ์„ธ์Šค์— ํ• ๋‹น๋œ GPU ๋ฉ”๋ชจ๋ฆฌ ์˜์—ญ์„ ํ™•์žฅํ•ฉ๋‹ˆ๋‹ค. ๋ฉ”๋ชจ๋ฆฌ ํ•ด์ œ๋Š” ๋ฉ”๋ชจ๋ฆฌ ๋‹จํŽธํ™”๋ฅผ ์•…ํ™”์‹œํ‚ค๋ฏ€๋กœ ๋ฉ”๋ชจ๋ฆฌ ํ•ด์ œ๋Š” ํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ํŠน์ • GPU์˜ ๋ฉ”๋ชจ๋ฆฌ ์ฆ๊ฐ€๋ฅผ ํ—ˆ์šฉํ•˜๋ ค๋ฉด ๋‹ค์Œ ์ฝ”๋“œ๋ฅผ ํ…์„œ๋‚˜ ์—ฐ์‚ฐ ์•ž์— ์ž…๋ ฅํ•˜์„ธ์š”.\n\n# In[ ]:\n\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n tf.config.experimental.set_memory_growth(gpus[0], True)\n except RuntimeError as e:\n # ํ”„๋กœ๊ทธ๋žจ ์‹œ์ž‘์‹œ์— ๋ฉ”๋ชจ๋ฆฌ ์ฆ๊ฐ€๊ฐ€ ์„ค์ •๋˜์–ด์•ผ๋งŒ ํ•ฉ๋‹ˆ๋‹ค\n print(e)\n\n\n# ๋˜ ๋‹ค๋ฅธ ๋ฐฉ๋ฒ•์€ `TF_FORCE_GPU_ALLOW_GROWTH` ํ™˜๊ฒฝ๋ณ€์ˆ˜๋ฅผ `true`๋กœ ์„ค์ •ํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด ์„ค์ •์€ ํ”Œ๋žซํผ ์ข…์†์ ์ž…๋‹ˆ๋‹ค.\n# \n# ๋‘ ๋ฒˆ์งธ ๋ฐฉ๋ฒ•์€ `tf.config.experimental.set_virtual_device_configuration`์œผ๋กœ ๊ฐ€์ƒ GPU ์žฅ์น˜๋ฅผ ์„ค์ •ํ•˜๊ณ  GPU์— ํ• ๋‹น๋  ์ „์ฒด ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์ œํ•œํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค.\n\n# In[ ]:\n\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # ํ…์„œํ”Œ๋กœ๊ฐ€ ์ฒซ ๋ฒˆ์งธ GPU์— 1GB ๋ฉ”๋ชจ๋ฆฌ๋งŒ ํ• ๋‹นํ•˜๋„๋ก ์ œํ•œ\n try:\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])\n except RuntimeError as e:\n # ํ”„๋กœ๊ทธ๋žจ ์‹œ์ž‘์‹œ์— ๊ฐ€์ƒ ์žฅ์น˜๊ฐ€ ์„ค์ •๋˜์–ด์•ผ๋งŒ ํ•ฉ๋‹ˆ๋‹ค\n print(e)\n\n\n# ์ด๋Š” ํ…์„œํ”Œ๋กœ ํ”„๋กœ์„ธ์Šค์—์„œ ์‚ฌ์šฉ๊ฐ€๋Šฅํ•œ GPU ๋ฉ”๋ชจ๋ฆฌ๋Ÿ‰์„ ์ œํ•œํ•˜๋Š”๋ฐ ์œ ์šฉํ•ฉ๋‹ˆ๋‹ค. ์›Œํฌ์Šคํ…Œ์ด์…˜ GUI๊ฐ™์ด GPU๊ฐ€ ๋‹ค๋ฅธ ์–ดํ”Œ๋ฆฌ์ผ€์ด์…˜๋“ค์— ๊ณต์œ ๋˜๋Š” ๋กœ์ปฌ ๊ฐœ๋ฐœํ™˜๊ฒฝ์—์„œ ๋ณดํ†ต ์‚ฌ์šฉ๋˜๋Š” ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค.\n\n# ## ๋ฉ€ํ‹ฐ GPU ์‹œ์Šคํ…œ์—์„œ ํ•˜๋‚˜์˜ GPU๋งŒ ์‚ฌ์šฉํ•˜๊ธฐ\n# \n# ์‹œ์Šคํ…œ์— ๋‘ ๊ฐœ ์ด์ƒ์˜ GPU๊ฐ€ ์žˆ๋‹ค๋ฉด ๋‚ฎ์€ ID์˜ GPU๊ฐ€ ๊ธฐ๋ณธ์œผ๋กœ ์„ ํƒ๋ฉ๋‹ˆ๋‹ค. ๋‹ค๋ฅธ GPU์—์„œ ์‹คํ–‰ํ•˜๊ณ  ์‹ถ์œผ๋ฉด ๋ช…์‹œ์ ์œผ๋กœ ํ‘œ์‹œํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค:\n\n# In[ ]:\n\n\ntf.debugging.set_log_device_placement(True)\n\ntry:\n # ์œ ํšจํ•˜์ง€ ์•Š์€ GPU ์žฅ์น˜๋ฅผ ๋ช…์‹œ\n with tf.device('/device:GPU:2'):\n a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])\n c = tf.matmul(a, b)\nexcept RuntimeError as e:\n print(e)\n\n\n# ๋ช…์‹œํ•œ ์žฅ์น˜๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š์œผ๋ฉด `RuntimeError`๊ฐ€ ๋‚˜์˜ต๋‹ˆ๋‹ค:\n# \n# ๋ช…์‹œํ•œ ์žฅ์น˜๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š์„ ๋•Œ ํ…์„œํ”Œ๋กœ๊ฐ€ ์ž๋™์œผ๋กœ ํ˜„์žฌ ์ง€์›ํ•˜๋Š” ์žฅ์น˜๋ฅผ ์„ ํƒํ•˜๊ฒŒ ํ•˜๋ ค๋ฉด `tf.config.set_soft_device_placement(True)`๋ฅผ ํ˜ธ์ถœํ•˜์„ธ์š”.\n\n# In[ ]:\n\n\ntf.config.set_soft_device_placement(True)\ntf.debugging.set_log_device_placement(True)\n\n# ํ…์„œ ์ƒ์„ฑ\na = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nb = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])\nc = tf.matmul(a, b)\n\nprint(c)\n\n\n# ## ๋ฉ€ํ‹ฐ GPU ์‚ฌ์šฉํ•˜๊ธฐ\n\n# #### `tf.distribute.Strategy` ์‚ฌ์šฉ\n# \n# ๋ฉ€ํ‹ฐ GPU๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฐ€์žฅ ์ข‹์€ ๋ฐฉ๋ฒ•์€ `tf.distriute.Strategy`๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๊ฐ„๋‹จํ•œ ์˜ˆ์ œ๋ฅผ ์‚ดํŽด๋ด…์‹œ๋‹ค:\n\n# In[ ]:\n\n\nstrategy = tf.distribute.MirroredStrategy()\n\nwith strategy.scope():\n inputs = tf.keras.layers.Input(shape=(1,))\n predictions = tf.keras.layers.Dense(1)(inputs)\n model = tf.keras.models.Model(inputs=inputs, outputs=predictions)\n model.compile(loss='mse',\n optimizer=tf.keras.optimizers.SGD(learning_rate=0.2))\n\n\n# ์ด ํ”„๋กœ๊ทธ๋žจ์€ ์ž…๋ ฅ ๋ฐ์ดํ„ฐ๋ฅผ ๋‚˜๋ˆ„๊ณ  ๋ชจ๋ธ์˜ ๋ณต์‚ฌ๋ณธ์„ ๊ฐ GPU์—์„œ ์‹คํ–‰ํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด๋Š” \"[๋ฐ์ดํ„ฐ ๋ณ‘๋ ฌ์ฒ˜๋ฆฌ](https://en.wikipedia.org/wiki/Data_parallelism)\"๋ผ๊ณ ๋„ ํ•ฉ๋‹ˆ๋‹ค.\n# \n# ๋ณ‘๋ ฌํ™” ์ „๋žต์— ๋Œ€ํ•ด ๋” ์•Œ๊ณ  ์‹ถ์œผ์‹œ๋ฉด [๊ฐ€์ด๋“œ](./distributed_training.ipynb)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”.\n\n# #### `tf.distribute.Strategy` ๋ฏธ์‚ฌ์šฉ\n# \n# `tf.distribute.Strategy`๋Š” ์—ฌ๋Ÿฌ ์žฅ์น˜์— ๊ฑธ์ณ ๊ณ„์‚ฐ์„ ๋ณต์ œํ•ด์„œ ๋™์ž‘ํ•ฉ๋‹ˆ๋‹ค. ๋ชจ๋ธ์„ ๊ฐ GPU์— ๊ตฌ์„ฑํ•˜์—ฌ ์ˆ˜๋™์œผ๋กœ ์ด๋ฅผ ๊ตฌํ˜„ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค๋ฉด:\n\n# In[ ]:\n\n\ntf.debugging.set_log_device_placement(True)\n\ngpus = tf.config.experimental.list_logical_devices('GPU')\nif gpus:\n # ์—ฌ๋Ÿฌ GPU์— ๊ณ„์‚ฐ์„ ๋ณต์ œ\n c = []\n for gpu in gpus:\n with tf.device(gpu.name):\n a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])\n c.append(tf.matmul(a, b))\n\n with tf.device('/CPU:0'):\n matmul_sum = tf.add_n(c)\n\n print(matmul_sum)\n\n" ]
[ [ "tensorflow.config.experimental.set_visible_devices", "tensorflow.config.set_soft_device_placement", "tensorflow.distribute.MirroredStrategy", "tensorflow.keras.optimizers.SGD", "tensorflow.keras.layers.Input", "tensorflow.matmul", "tensorflow.config.experimental.set_memory_growth", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.add_n", "tensorflow.debugging.set_log_device_placement", "tensorflow.constant", "tensorflow.config.experimental.list_logical_devices", "tensorflow.device", "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.VirtualDeviceConfiguration" ] ]
TsingZ0/PFL-Non-IID
[ "2ec1b3410a9a82a241087c6da51f6be95db09dae" ]
[ "dataset/generate_agnews.py" ]
[ "import numpy as np\nimport os\nimport sys\nimport random\nimport torchtext\nfrom utils.dataset_utils import check, separate_data, split_data, save_file\nfrom torchtext.data.utils import get_tokenizer\nfrom torchtext.vocab import build_vocab_from_iterator\n\n\nrandom.seed(1)\nnp.random.seed(1)\nnum_clients = 20\nnum_classes = 4\nmax_len = 200\ndir_path = \"agnews/\"\n\n\n# Allocate data to users\ndef generate_agnews(dir_path, num_clients, num_classes, niid=False, real=True, partition=None):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n \n # Setup directory for train/test data\n config_path = dir_path + \"config.json\"\n train_path = dir_path + \"train/train.json\"\n test_path = dir_path + \"test/test.json\"\n\n if check(config_path, train_path, test_path, num_clients, num_classes, niid, real, partition):\n return\n\n # Get AG_News data\n trainset, testset = torchtext.datasets.AG_NEWS(root=dir_path+\"rawdata\")\n\n trainlabel, traintext = list(zip(*trainset))\n testlabel, testtext = list(zip(*testset))\n\n dataset_text = []\n dataset_label = []\n\n dataset_text.extend(traintext)\n dataset_text.extend(testtext)\n dataset_label.extend(trainlabel)\n dataset_label.extend(testlabel)\n\n tokenizer = get_tokenizer('basic_english')\n vocab = build_vocab_from_iterator(map(tokenizer, iter(dataset_text)), specials=[\"<unk>\"])\n vocab.set_default_index(vocab[\"<unk>\"])\n\n text_pipeline = lambda x: vocab(tokenizer(x))\n label_pipeline = lambda x: int(x) - 1\n\n def text_transform(text, label, max_len=0):\n label_list, text_list = [], []\n for _text, _label in zip(text, label):\n label_list.append(label_pipeline(_label))\n text_ = text_pipeline(_text)\n padding = [0 for i in range(max_len-len(text_))]\n text_.extend(padding)\n text_list.append(text_[:max_len])\n return label_list, text_list\n\n label_list, text_list = text_transform(dataset_text, dataset_label, max_len)\n\n text_lens = [len(text) for text in text_list]\n # max_len = max(text_lens)\n # label_list, text_list = text_transform(dataset_text, dataset_label, max_len)\n\n text_list = [(text, l) for text, l in zip(text_list, text_lens)]\n\n text_list = np.array(text_list, dtype=object)\n label_list = np.array(label_list)\n\n # dataset = []\n # for i in range(num_classes):\n # idx = label_list == i\n # dataset.append(text_list[idx])\n\n X, y, statistic = separate_data((text_list, label_list), num_clients, num_classes, niid, real, partition)\n train_data, test_data = split_data(X, y)\n save_file(config_path, train_path, test_path, train_data, test_data, num_clients, num_classes, \n statistic, niid, real, partition)\n\n print(\"The size of vocabulary:\", len(vocab))\n\n\nif __name__ == \"__main__\":\n niid = True if sys.argv[1] == \"noniid\" else False\n real = True if sys.argv[2] == \"realworld\" else False\n partition = sys.argv[3] if sys.argv[3] != \"-\" else None\n\n generate_agnews(dir_path, num_clients, num_classes, niid, real, partition)" ]
[ [ "numpy.random.seed", "numpy.array" ] ]
a3sha2/aslprep-2
[ "eaa5f7cfd91494c10a8fbaaa43326e65d42c8d77" ]
[ "aslprep/niworkflows/interfaces/utils.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Utilities.\"\"\"\nimport os\nimport re\nimport json\nimport shutil\nimport numpy as np\nimport nibabel as nb\nimport nilearn.image as nli\nfrom textwrap import indent\nfrom collections import OrderedDict\n\nimport scipy.ndimage as nd\nfrom nipype import logging\nfrom nipype.utils.filemanip import fname_presuffix\nfrom nipype.utils.misc import normalize_mc_params\nfrom nipype.interfaces.io import add_traits\nfrom nipype.interfaces.base import (\n traits, isdefined, File, InputMultiPath,\n TraitedSpec, BaseInterfaceInputSpec, SimpleInterface,\n DynamicTraitedSpec\n)\nfrom .. import __version__\n\n\nLOG = logging.getLogger('nipype.interface')\n\n\nclass _CopyXFormInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):\n hdr_file = File(exists=True, mandatory=True, desc='the file we get the header from')\n\n\nclass CopyXForm(SimpleInterface):\n \"\"\"\n Copy the x-form matrices from `hdr_file` to `out_file`.\n \"\"\"\n input_spec = _CopyXFormInputSpec\n output_spec = DynamicTraitedSpec\n\n def __init__(self, fields=None, **inputs):\n self._fields = fields or ['in_file']\n if isinstance(self._fields, str):\n self._fields = [self._fields]\n\n super(CopyXForm, self).__init__(**inputs)\n\n add_traits(self.inputs, self._fields)\n for f in set(self._fields).intersection(list(inputs.keys())):\n setattr(self.inputs, f, inputs[f])\n\n def _outputs(self):\n base = super(CopyXForm, self)._outputs()\n if self._fields:\n fields = self._fields.copy()\n if 'in_file' in fields:\n idx = fields.index('in_file')\n fields.pop(idx)\n fields.insert(idx, 'out_file')\n\n base = add_traits(base, fields)\n return base\n\n def _run_interface(self, runtime):\n for f in self._fields:\n in_files = getattr(self.inputs, f)\n self._results[f] = []\n if isinstance(in_files, str):\n in_files = [in_files]\n for in_file in in_files:\n out_name = fname_presuffix(\n in_file, suffix='_xform', newpath=runtime.cwd)\n # Copy and replace header\n shutil.copy(in_file, out_name)\n _copyxform(self.inputs.hdr_file, out_name,\n message='CopyXForm (niworkflows v%s)' % __version__)\n self._results[f].append(out_name)\n\n # Flatten out one-element lists\n if len(self._results[f]) == 1:\n self._results[f] = self._results[f][0]\n\n default = self._results.pop('in_file', None)\n if default:\n self._results['out_file'] = default\n return runtime\n\n\nclass _CopyHeaderInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='the file we get the data from')\n hdr_file = File(exists=True, mandatory=True, desc='the file we get the header from')\n\n\nclass _CopyHeaderOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='written file path')\n\n\nclass CopyHeader(SimpleInterface):\n \"\"\"\n Copy a header from the `hdr_file` to `out_file` with data drawn from\n `in_file`.\n \"\"\"\n input_spec = _CopyHeaderInputSpec\n output_spec = _CopyHeaderOutputSpec\n\n def _run_interface(self, runtime):\n in_img = nb.load(self.inputs.hdr_file)\n out_img = nb.load(self.inputs.in_file)\n new_img = out_img.__class__(out_img.dataobj, in_img.affine, in_img.header)\n new_img.set_data_dtype(out_img.get_data_dtype())\n\n out_name = fname_presuffix(self.inputs.in_file,\n suffix='_fixhdr', newpath='.')\n new_img.to_filename(out_name)\n self._results['out_file'] = out_name\n return runtime\n\n\nclass _NormalizeMotionParamsInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='the input parameters file')\n format = traits.Enum('FSL', 'AFNI', 'FSFAST', 'NIPY', usedefault=True,\n desc='output format')\n\n\nclass _NormalizeMotionParamsOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='written file path')\n\n\nclass NormalizeMotionParams(SimpleInterface):\n \"\"\"\n Convert input motion parameters into the designated convention.\n\n \"\"\"\n input_spec = _NormalizeMotionParamsInputSpec\n output_spec = _NormalizeMotionParamsOutputSpec\n\n def _run_interface(self, runtime):\n mpars = np.loadtxt(self.inputs.in_file) # mpars is N_t x 6\n mpars = np.apply_along_axis(\n func1d=normalize_mc_params,\n axis=1, arr=mpars,\n source=self.inputs.format)\n self._results['out_file'] = os.path.join(runtime.cwd, \"motion_params.txt\")\n np.savetxt(self._results['out_file'], mpars)\n return runtime\n\n\nclass _GenerateSamplingReferenceInputSpec(BaseInterfaceInputSpec):\n fixed_image = File(exists=True, mandatory=True,\n desc='the reference file, defines the FoV')\n moving_image = File(exists=True, mandatory=True, desc='the pixel size reference')\n xform_code = traits.Enum(None, 2, 4, usedefault=True,\n desc='force xform code')\n fov_mask = traits.Either(None, File(exists=True), usedefault=True,\n desc='mask to clip field of view (in fixed_image space)')\n keep_native = traits.Bool(True, usedefault=True,\n desc='calculate a grid with native resolution covering '\n 'the volume extent given by fixed_image, fast forward '\n 'fixed_image otherwise.')\n\n\nclass _GenerateSamplingReferenceOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='one file with all inputs flattened')\n\n\nclass GenerateSamplingReference(SimpleInterface):\n \"\"\"\n Generates a reference grid for resampling one image keeping original resolution,\n but moving data to a different space (e.g. MNI).\n\n If the `fov_mask` optional input is provided, then the abbr:`FoV (field-of-view)`\n is cropped to a bounding box containing the brain mask plus an offest of two\n voxels along all dimensions. The `fov_mask` should be to the brain mask calculated\n from the T1w, and should not contain the brain stem. The mask is resampled into\n target space, and then the bounding box is calculated. Finally, the FoV is adjusted\n to that bounding box.\n\n\n \"\"\"\n\n input_spec = _GenerateSamplingReferenceInputSpec\n output_spec = _GenerateSamplingReferenceOutputSpec\n\n def _run_interface(self, runtime):\n if not self.inputs.keep_native:\n self._results['out_file'] = self.inputs.fixed_image\n return runtime\n self._results['out_file'] = _gen_reference(\n self.inputs.fixed_image,\n self.inputs.moving_image,\n fov_mask=self.inputs.fov_mask,\n force_xform_code=self.inputs.xform_code,\n message='%s (niworkflows v%s)' % (self.__class__.__name__, __version__))\n return runtime\n\n\ndef _copyxform(ref_image, out_image, message=None):\n # Read in reference and output\n # Use mmap=False because we will be overwriting the output image\n resampled = nb.load(out_image, mmap=False)\n orig = nb.load(ref_image)\n\n if not np.allclose(orig.affine, resampled.affine):\n LOG.debug(\n 'Affines of input and reference images do not match, '\n 'FMRIPREP will set the reference image headers. '\n 'Please, check that the x-form matrices of the input dataset'\n 'are correct and manually verify the alignment of results.')\n\n # Copy xform infos\n qform, qform_code = orig.header.get_qform(coded=True)\n sform, sform_code = orig.header.get_sform(coded=True)\n header = resampled.header.copy()\n header.set_qform(qform, int(qform_code))\n header.set_sform(sform, int(sform_code))\n header['descrip'] = 'xform matrices modified by %s.' % (message or '(unknown)')\n\n newimg = resampled.__class__(resampled.dataobj, orig.affine, header)\n newimg.to_filename(out_image)\n\n\ndef _gen_reference(fixed_image, moving_image, fov_mask=None, out_file=None,\n message=None, force_xform_code=None):\n \"\"\"\n Generates a sampling reference, and makes sure xform matrices/codes are\n correct\n \"\"\"\n\n if out_file is None:\n out_file = fname_presuffix(fixed_image,\n suffix='_reference',\n newpath=os.getcwd())\n\n # Moving images may not be RAS/LPS (more generally, transverse-longitudinal-axial)\n reoriented_moving_img = nb.as_closest_canonical(nb.load(moving_image))\n new_zooms = reoriented_moving_img.header.get_zooms()[:3]\n\n # Avoid small differences in reported resolution to cause changes to\n # FOV. See https://github.com/poldracklab/fmriprep/issues/512\n # A positive diagonal affine is RAS, hence the need to reorient above.\n new_affine = np.diag(np.round(new_zooms, 3))\n\n resampled = nli.resample_img(fixed_image,\n target_affine=new_affine,\n interpolation='nearest')\n\n if fov_mask is not None:\n # If we have a mask, resample again dropping (empty) samples\n # out of the FoV.\n fixednii = nb.load(fixed_image)\n masknii = nb.load(fov_mask)\n\n if np.all(masknii.shape[:3] != fixednii.shape[:3]):\n raise RuntimeError(\n 'Fixed image and mask do not have the same dimensions.')\n\n if not np.allclose(masknii.affine, fixednii.affine, atol=1e-5):\n raise RuntimeError(\n 'Fixed image and mask have different affines')\n\n # Get mask into reference space\n masknii = nli.resample_img(fixed_image,\n target_affine=new_affine,\n interpolation='nearest')\n res_shape = np.array(masknii.shape[:3])\n\n # Calculate a bounding box for the input mask\n # with an offset of 2 voxels per face\n bbox = np.argwhere(np.asanyarray(masknii.dataobj) > 0)\n new_origin = np.clip(bbox.min(0) - 2, a_min=0, a_max=None)\n new_end = np.clip(bbox.max(0) + 2, a_min=0,\n a_max=res_shape - 1)\n\n # Find new origin, and set into new affine\n new_affine_4 = resampled.affine.copy()\n new_affine_4[:3, 3] = new_affine_4[:3, :3].dot(\n new_origin) + new_affine_4[:3, 3]\n\n # Calculate new shapes\n new_shape = new_end - new_origin + 1\n resampled = nli.resample_img(fixed_image,\n target_affine=new_affine_4,\n target_shape=new_shape.tolist(),\n interpolation='nearest')\n\n xform = resampled.affine # nibabel will pick the best affine\n _, qform_code = resampled.header.get_qform(coded=True)\n _, sform_code = resampled.header.get_sform(coded=True)\n\n xform_code = sform_code if sform_code > 0 else qform_code\n if xform_code == 1:\n xform_code = 2\n\n if force_xform_code is not None:\n xform_code = force_xform_code\n\n # Keep 0, 2, 3, 4 unchanged\n resampled.header.set_qform(xform, int(xform_code))\n resampled.header.set_sform(xform, int(xform_code))\n resampled.header['descrip'] = 'reference image generated by %s.' % (\n message or '(unknown software)')\n resampled.to_filename(out_file)\n return out_file\n\n\nclass _SanitizeImageInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='input image')\n n_volumes_to_discard = traits.Int(0, usedefault=True, desc='discard n first volumes')\n max_32bit = traits.Bool(False, usedefault=True, desc='cast data to float32 if higher '\n 'precision is encountered')\n\n\nclass _SanitizeImageOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='validated image')\n out_report = File(exists=True, desc='HTML segment containing warning')\n\n\nclass SanitizeImage(SimpleInterface):\n \"\"\"\n Check the correctness of x-form headers (matrix and code) and fixes\n problematic combinations of values. Removes any extension form the header\n if present.\n This interface implements the `following logic\n <https://github.com/poldracklab/fmriprep/issues/873#issuecomment-349394544>`_:\n +-------------------+------------------+------------------+------------------\\\n+------------------------------------------------+\n | valid quaternions | `qform_code > 0` | `sform_code > 0` | `qform == sform` \\\n| actions |\n +===================+==================+==================+==================\\\n+================================================+\n | True | True | True | True \\\n| None |\n +-------------------+------------------+------------------+------------------\\\n+------------------------------------------------+\n | True | True | False | * \\\n| sform, scode <- qform, qcode |\n +-------------------+------------------+------------------+------------------\\\n+------------------------------------------------+\n | * | True | * | False \\\n| sform, scode <- qform, qcode |\n +-------------------+------------------+------------------+------------------\\\n+------------------------------------------------+\n | * | False | True | * \\\n| qform, qcode <- sform, scode |\n +-------------------+------------------+------------------+------------------\\\n+------------------------------------------------+\n | * | False | False | * \\\n| sform, qform <- best affine; scode, qcode <- 1 |\n +-------------------+------------------+------------------+------------------\\\n+------------------------------------------------+\n | False | * | False | * \\\n| sform, qform <- best affine; scode, qcode <- 1 |\n +-------------------+------------------+------------------+------------------\\\n+------------------------------------------------+\n \"\"\"\n input_spec = _SanitizeImageInputSpec\n output_spec = _SanitizeImageOutputSpec\n\n def _run_interface(self, runtime):\n img = nb.load(self.inputs.in_file)\n out_report = os.path.join(runtime.cwd, 'report.html')\n\n # Retrieve xform codes\n sform_code = int(img.header._structarr['sform_code'])\n qform_code = int(img.header._structarr['qform_code'])\n\n # Check qform is valid\n valid_qform = False\n try:\n img.get_qform()\n valid_qform = True\n except ValueError:\n pass\n\n # Matching affines\n matching_affines = valid_qform and np.allclose(img.get_qform(), img.get_sform())\n\n save_file = False\n warning_txt = ''\n\n # Both match, qform valid (implicit with match), codes okay -> do nothing, empty report\n if matching_affines and qform_code > 0 and sform_code > 0:\n self._results['out_file'] = self.inputs.in_file\n open(out_report, 'w').close()\n\n # Row 2:\n elif valid_qform and qform_code > 0:\n img.set_sform(img.get_qform(), qform_code)\n save_file = True\n warning_txt = 'Note on orientation: sform matrix set'\n description = \"\"\"\\\n<p class=\"elem-desc\">The sform has been copied from qform.</p>\n\"\"\"\n # Rows 3-4:\n # Note: if qform is not valid, matching_affines is False\n elif sform_code > 0 and (not matching_affines or qform_code == 0):\n img.set_qform(img.get_sform(), sform_code)\n save_file = True\n warning_txt = 'Note on orientation: qform matrix overwritten'\n description = \"\"\"\\\n<p class=\"elem-desc\">The qform has been copied from sform.</p>\n\"\"\"\n if not valid_qform and qform_code > 0:\n warning_txt = 'WARNING - Invalid qform information'\n description = \"\"\"\\\n<p class=\"elem-desc\">\n The qform matrix found in the file header is invalid.\n The qform has been copied from sform.\n Checking the original qform information from the data produced\n by the scanner is advised.\n</p>\n\"\"\"\n # Rows 5-6:\n else:\n affine = img.affine\n img.set_sform(affine, nb.nifti1.xform_codes['scanner'])\n img.set_qform(affine, nb.nifti1.xform_codes['scanner'])\n save_file = True\n warning_txt = 'WARNING - Missing orientation information'\n description = \"\"\"\\\n<p class=\"elem-desc\">\n Orientation information could not be retrieved from the image header.\n The qform and sform matrices have been set to a default, LAS-oriented affine.\n Analyses of this dataset MAY BE INVALID.\n</p>\n\"\"\"\n\n if (\n (self.inputs.max_32bit\n and np.dtype(img.get_data_dtype()).itemsize > 4)\n or self.inputs.n_volumes_to_discard\n ):\n # force float32 only if 64 bit dtype is detected\n if (self.inputs.max_32bit and np.dtype(img.get_data_dtype()).itemsize > 4):\n in_data = img.get_fdata(dtype=np.float32)\n else:\n in_data = img.dataobj\n\n img = nb.Nifti1Image(in_data[:, :, :, self.inputs.n_volumes_to_discard:],\n img.affine,\n img.header)\n save_file = True\n\n if len(img.header.extensions) != 0:\n img.header.extensions.clear()\n save_file = True\n\n # Store new file\n if save_file:\n out_fname = fname_presuffix(self.inputs.in_file, suffix='_valid',\n newpath=runtime.cwd)\n self._results['out_file'] = out_fname\n img.to_filename(out_fname)\n\n if warning_txt:\n snippet = '<h3 class=\"elem-title\">%s</h3>\\n%s\\n' % (\n warning_txt, description)\n with open(out_report, 'w') as fobj:\n fobj.write(indent(snippet, '\\t' * 3))\n\n self._results['out_report'] = out_report\n return runtime\n\n\nclass _TPM2ROIInputSpec(BaseInterfaceInputSpec):\n in_tpm = File(exists=True, mandatory=True, desc='Tissue probability map file in T1 space')\n in_mask = File(exists=True, mandatory=True, desc='Binary mask of skull-stripped T1w image')\n mask_erode_mm = traits.Float(xor=['mask_erode_prop'],\n desc='erode input mask (kernel width in mm)')\n erode_mm = traits.Float(xor=['erode_prop'],\n desc='erode output mask (kernel width in mm)')\n mask_erode_prop = traits.Float(xor=['mask_erode_mm'],\n desc='erode input mask (target volume ratio)')\n erode_prop = traits.Float(xor=['erode_mm'],\n desc='erode output mask (target volume ratio)')\n prob_thresh = traits.Float(0.95, usedefault=True,\n desc='threshold for the tissue probability maps')\n\n\nclass _TPM2ROIOutputSpec(TraitedSpec):\n roi_file = File(exists=True, desc='output ROI file')\n eroded_mask = File(exists=True, desc='resulting eroded mask')\n\n\nclass TPM2ROI(SimpleInterface):\n \"\"\"Convert tissue probability maps (TPMs) into ROIs\n\n This interface follows the following logic:\n\n #. Erode ``in_mask`` by ``mask_erode_mm`` and apply to ``in_tpm``\n #. Threshold masked TPM at ``prob_thresh``\n #. Erode resulting mask by ``erode_mm``\n\n \"\"\"\n\n input_spec = _TPM2ROIInputSpec\n output_spec = _TPM2ROIOutputSpec\n\n def _run_interface(self, runtime):\n mask_erode_mm = self.inputs.mask_erode_mm\n if not isdefined(mask_erode_mm):\n mask_erode_mm = None\n erode_mm = self.inputs.erode_mm\n if not isdefined(erode_mm):\n erode_mm = None\n mask_erode_prop = self.inputs.mask_erode_prop\n if not isdefined(mask_erode_prop):\n mask_erode_prop = None\n erode_prop = self.inputs.erode_prop\n if not isdefined(erode_prop):\n erode_prop = None\n roi_file, eroded_mask = _tpm2roi(\n self.inputs.in_tpm,\n self.inputs.in_mask,\n mask_erode_mm,\n erode_mm,\n mask_erode_prop,\n erode_prop,\n self.inputs.prob_thresh,\n newpath=runtime.cwd,\n )\n self._results['roi_file'] = roi_file\n self._results['eroded_mask'] = eroded_mask\n return runtime\n\n\nclass _AddTPMsInputSpec(BaseInterfaceInputSpec):\n in_files = InputMultiPath(File(exists=True), mandatory=True, desc='input list of ROIs')\n indices = traits.List(traits.Int, desc='select specific maps')\n\n\nclass _AddTPMsOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='union of binarized input files')\n\n\nclass AddTPMs(SimpleInterface):\n \"\"\"Calculate the union of several :abbr:`TPMs (tissue-probability map)`\"\"\"\n input_spec = _AddTPMsInputSpec\n output_spec = _AddTPMsOutputSpec\n\n def _run_interface(self, runtime):\n in_files = self.inputs.in_files\n\n indices = list(range(len(in_files)))\n if isdefined(self.inputs.indices):\n indices = self.inputs.indices\n\n if len(self.inputs.in_files) < 2:\n self._results['out_file'] = in_files[0]\n return runtime\n\n first_fname = in_files[indices[0]]\n if len(indices) == 1:\n self._results['out_file'] = first_fname\n return runtime\n\n im = nb.concat_images([in_files[i] for i in indices])\n data = im.get_fdata().sum(axis=3)\n data = np.clip(data, a_min=0.0, a_max=1.0)\n\n out_file = fname_presuffix(first_fname, suffix='_tpmsum',\n newpath=runtime.cwd)\n newnii = im.__class__(data, im.affine, im.header)\n newnii.set_data_dtype(np.float32)\n\n # Set visualization thresholds\n newnii.header['cal_max'] = 1.0\n newnii.header['cal_min'] = 0.0\n newnii.to_filename(out_file)\n self._results['out_file'] = out_file\n\n return runtime\n\n\nclass _AddTSVHeaderInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='input file')\n columns = traits.List(traits.Str, mandatory=True, desc='header for columns')\n\n\nclass _AddTSVHeaderOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='output average file')\n\n\nclass AddTSVHeader(SimpleInterface):\n r\"\"\"Add a header row to a TSV file\n\n .. testsetup::\n\n >>> cwd = os.getcwd()\n >>> os.chdir(tmpdir)\n\n .. doctest::\n\n An example TSV:\n\n >>> np.savetxt('data.tsv', np.arange(30).reshape((6, 5)), delimiter='\\t')\n\n Add headers:\n\n >>> addheader = AddTSVHeader()\n >>> addheader.inputs.in_file = 'data.tsv'\n >>> addheader.inputs.columns = ['a', 'b', 'c', 'd', 'e']\n >>> res = addheader.run()\n >>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,\n ... index_col=None)\n >>> df.columns.ravel().tolist()\n ['a', 'b', 'c', 'd', 'e']\n\n >>> np.all(df.values == np.arange(30).reshape((6, 5)))\n True\n\n .. testcleanup::\n\n >>> os.chdir(cwd)\n\n \"\"\"\n input_spec = _AddTSVHeaderInputSpec\n output_spec = _AddTSVHeaderOutputSpec\n\n def _run_interface(self, runtime):\n out_file = fname_presuffix(self.inputs.in_file, suffix='_motion.tsv', newpath=runtime.cwd,\n use_ext=False)\n data = np.loadtxt(self.inputs.in_file)\n np.savetxt(out_file, data, delimiter='\\t', header='\\t'.join(self.inputs.columns),\n comments='')\n\n self._results['out_file'] = out_file\n return runtime\n\n\nclass _JoinTSVColumnsInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='input file')\n join_file = File(exists=True, mandatory=True, desc='file to be adjoined')\n side = traits.Enum('right', 'left', usedefault=True, desc='where to join')\n columns = traits.List(traits.Str, desc='header for columns')\n\n\nclass _JoinTSVColumnsOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='output TSV file')\n\n\nclass JoinTSVColumns(SimpleInterface):\n r\"\"\"Add a header row to a TSV file\n\n .. testsetup::\n\n >>> cwd = os.getcwd()\n >>> os.chdir(tmpdir)\n\n .. doctest::\n\n An example TSV:\n\n >>> data = np.arange(30).reshape((6, 5))\n >>> np.savetxt('data.tsv', data[:, :3], delimiter='\\t')\n >>> np.savetxt('add.tsv', data[:, 3:], delimiter='\\t')\n\n Join without naming headers:\n\n >>> join = JoinTSVColumns()\n >>> join.inputs.in_file = 'data.tsv'\n >>> join.inputs.join_file = 'add.tsv'\n >>> res = join.run()\n >>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,\n ... index_col=None, dtype=float, header=None)\n >>> df.columns.ravel().tolist() == list(range(5))\n True\n\n >>> np.all(df.values.astype(int) == data)\n True\n\n\n Adding column names:\n\n >>> join = JoinTSVColumns()\n >>> join.inputs.in_file = 'data.tsv'\n >>> join.inputs.join_file = 'add.tsv'\n >>> join.inputs.columns = ['a', 'b', 'c', 'd', 'e']\n >>> res = join.run()\n >>> res.outputs.out_file # doctest: +ELLIPSIS\n '...data_joined.tsv'\n >>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,\n ... index_col=None)\n >>> df.columns.ravel().tolist()\n ['a', 'b', 'c', 'd', 'e']\n\n >>> np.all(df.values == np.arange(30).reshape((6, 5)))\n True\n\n >>> join = JoinTSVColumns()\n >>> join.inputs.in_file = 'data.tsv'\n >>> join.inputs.join_file = 'add.tsv'\n >>> join.inputs.side = 'left'\n >>> join.inputs.columns = ['a', 'b', 'c', 'd', 'e']\n >>> res = join.run()\n >>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,\n ... index_col=None)\n >>> df.columns.ravel().tolist()\n ['a', 'b', 'c', 'd', 'e']\n\n >>> np.all(df.values == np.hstack((data[:, 3:], data[:, :3])))\n True\n\n .. testcleanup::\n\n >>> os.chdir(cwd)\n\n \"\"\"\n input_spec = _JoinTSVColumnsInputSpec\n output_spec = _JoinTSVColumnsOutputSpec\n\n def _run_interface(self, runtime):\n out_file = fname_presuffix(\n self.inputs.in_file, suffix='_joined.tsv', newpath=runtime.cwd,\n use_ext=False)\n\n header = ''\n if isdefined(self.inputs.columns) and self.inputs.columns:\n header = '\\t'.join(self.inputs.columns)\n\n with open(self.inputs.in_file) as ifh:\n data = ifh.read().splitlines(keepends=False)\n\n with open(self.inputs.join_file) as ifh:\n join = ifh.read().splitlines(keepends=False)\n\n assert len(data) == len(join)\n\n merged = []\n for d, j in zip(data, join):\n line = '%s\\t%s' % ((j, d) if self.inputs.side == 'left' else (d, j))\n merged.append(line)\n\n if header:\n merged.insert(0, header)\n\n with open(out_file, 'w') as ofh:\n ofh.write('\\n'.join(merged))\n\n self._results['out_file'] = out_file\n return runtime\n\n\nclass _DictMergeInputSpec(BaseInterfaceInputSpec):\n in_dicts = traits.List(\n traits.Either(traits.Dict, traits.Instance(OrderedDict)),\n desc='Dictionaries to be merged. In the event of a collision, values '\n 'from dictionaries later in the list receive precedence.')\n\n\nclass _DictMergeOutputSpec(TraitedSpec):\n out_dict = traits.Dict(desc='Merged dictionary')\n\n\nclass DictMerge(SimpleInterface):\n \"\"\"Merge (ordered) dictionaries.\"\"\"\n input_spec = _DictMergeInputSpec\n output_spec = _DictMergeOutputSpec\n\n def _run_interface(self, runtime):\n out_dict = {}\n for in_dict in self.inputs.in_dicts:\n out_dict.update(in_dict)\n self._results['out_dict'] = out_dict\n return runtime\n\n\nclass _TSV2JSONInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='Input TSV file')\n index_column = traits.Str(mandatory=True,\n desc='Name of the column in the TSV to be used '\n 'as the top-level key in the JSON. All '\n 'remaining columns will be assigned as '\n 'nested keys.')\n output = traits.Either(None, File,\n desc='Path where the output file is to be saved. '\n 'If this is `None`, then a JSON-compatible '\n 'dictionary is returned instead.')\n additional_metadata = traits.Either(None, traits.Dict,\n traits.Instance(OrderedDict),\n usedefault=True,\n desc='Any additional metadata that '\n 'should be applied to all '\n 'entries in the JSON.')\n drop_columns = traits.Either(None, traits.List(), usedefault=True,\n desc='List of columns in the TSV to be '\n 'dropped from the JSON.')\n enforce_case = traits.Bool(True, usedefault=True,\n desc='Enforce snake case for top-level keys '\n 'and camel case for nested keys')\n\n\nclass _TSV2JSONOutputSpec(TraitedSpec):\n output = traits.Either(traits.Dict, File(exists=True),\n traits.Instance(OrderedDict),\n desc='Output dictionary or JSON file')\n\n\nclass TSV2JSON(SimpleInterface):\n \"\"\"Convert metadata from TSV format to JSON format.\n \"\"\"\n input_spec = _TSV2JSONInputSpec\n output_spec = _TSV2JSONOutputSpec\n\n def _run_interface(self, runtime):\n if not isdefined(self.inputs.output):\n output = fname_presuffix(\n self.inputs.in_file, suffix='.json', newpath=runtime.cwd,\n use_ext=False)\n else:\n output = self.inputs.output\n\n self._results['output'] = _tsv2json(\n in_tsv=self.inputs.in_file,\n out_json=output,\n index_column=self.inputs.index_column,\n additional_metadata=self.inputs.additional_metadata,\n drop_columns=self.inputs.drop_columns,\n enforce_case=self.inputs.enforce_case\n )\n return runtime\n\n\ndef _tsv2json(in_tsv, out_json, index_column, additional_metadata=None,\n drop_columns=None, enforce_case=True):\n \"\"\"\n Convert metadata from TSV format to JSON format.\n\n Parameters\n ----------\n in_tsv: str\n Path to the metadata in TSV format.\n out_json: str\n Path where the metadata should be saved in JSON format after\n conversion. If this is None, then a dictionary is returned instead.\n index_column: str\n Name of the column in the TSV to be used as an index (top-level key in\n the JSON).\n additional_metadata: dict\n Any additional metadata that should be applied to all entries in the\n JSON.\n drop_columns: list\n List of columns from the input TSV to be dropped from the JSON.\n enforce_case: bool\n Indicates whether BIDS case conventions should be followed. Currently,\n this means that index fields (column names in the associated data TSV)\n use snake case and other fields use camel case.\n\n Returns\n -------\n str\n Path to the metadata saved in JSON format.\n \"\"\"\n import pandas as pd\n # Adapted from https://dev.to/rrampage/snake-case-to-camel-case-and- ...\n # back-using-regular-expressions-and-python-m9j\n re_to_camel = r'(.*?)_([a-zA-Z0-9])'\n re_to_snake = r'(^.+?|.*?)((?<![_A-Z])[A-Z]|(?<![_0-9])[0-9]+)'\n\n def snake(match):\n return '{}_{}'.format(match.group(1).lower(), match.group(2).lower())\n\n def camel(match):\n return '{}{}'.format(match.group(1), match.group(2).upper())\n\n # from fmriprep\n def less_breakable(a_string):\n \"\"\" hardens the string to different envs (i.e. case insensitive, no\n whitespace, '#' \"\"\"\n return ''.join(a_string.split()).strip('#')\n\n drop_columns = drop_columns or []\n additional_metadata = additional_metadata or {}\n tsv_data = pd.read_csv(in_tsv, '\\t')\n for k, v in additional_metadata.items():\n tsv_data[k] = [v] * len(tsv_data.index)\n for col in drop_columns:\n tsv_data.drop(labels=col, axis='columns', inplace=True)\n tsv_data.set_index(index_column, drop=True, inplace=True)\n if enforce_case:\n tsv_data.index = [re.sub(re_to_snake, snake,\n less_breakable(i), 0).lower()\n for i in tsv_data.index]\n tsv_data.columns = [re.sub(re_to_camel, camel,\n less_breakable(i).title(), 0)\n for i in tsv_data.columns]\n json_data = tsv_data.to_json(orient='index')\n json_data = json.JSONDecoder(\n object_pairs_hook=OrderedDict).decode(json_data)\n for i in json_data:\n json_data[i].update(additional_metadata)\n\n if out_json is None:\n return json_data\n\n with open(out_json, 'w') as f:\n json.dump(json_data, f, indent=4)\n return out_json\n\n\ndef _tpm2roi(in_tpm, in_mask, mask_erosion_mm=None, erosion_mm=None,\n mask_erosion_prop=None, erosion_prop=None, pthres=0.95,\n newpath=None):\n \"\"\"\n Generate a mask from a tissue probability map\n \"\"\"\n tpm_img = nb.load(in_tpm)\n roi_mask = (tpm_img.get_fdata() >= pthres).astype(np.uint8)\n\n eroded_mask_file = None\n erode_in = (\n (mask_erosion_mm is not None and mask_erosion_mm > 0)\n or (mask_erosion_prop is not None and mask_erosion_prop < 1)\n )\n if erode_in:\n eroded_mask_file = fname_presuffix(in_mask, suffix='_eroded',\n newpath=newpath)\n mask_img = nb.load(in_mask)\n mask_data = np.asanyarray(mask_img.dataobj).astype(np.uint8)\n if mask_erosion_mm:\n iter_n = max(int(mask_erosion_mm / max(mask_img.header.get_zooms())), 1)\n mask_data = nd.binary_erosion(mask_data, iterations=iter_n)\n else:\n orig_vol = np.sum(mask_data > 0)\n while np.sum(mask_data > 0) / orig_vol > mask_erosion_prop:\n mask_data = nd.binary_erosion(mask_data, iterations=1)\n\n # Store mask\n eroded = nb.Nifti1Image(mask_data, mask_img.affine, mask_img.header)\n eroded.set_data_dtype(np.uint8)\n eroded.to_filename(eroded_mask_file)\n\n # Mask TPM data (no effect if not eroded)\n roi_mask[~mask_data] = 0\n\n # shrinking\n erode_out = (\n (erosion_mm is not None and erosion_mm > 0)\n or (erosion_prop is not None and erosion_prop < 1)\n )\n if erode_out:\n if erosion_mm:\n iter_n = max(int(erosion_mm / max(tpm_img.header.get_zooms())), 1)\n iter_n = int(erosion_mm / max(tpm_img.header.get_zooms()))\n roi_mask = nd.binary_erosion(roi_mask, iterations=iter_n)\n else:\n orig_vol = np.sum(roi_mask > 0)\n while np.sum(roi_mask > 0) / orig_vol > erosion_prop:\n roi_mask = nd.binary_erosion(roi_mask, iterations=1)\n\n # Create image to resample\n roi_fname = fname_presuffix(in_tpm, suffix='_roi', newpath=newpath)\n roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header)\n roi_img.set_data_dtype(np.uint8)\n roi_img.to_filename(roi_fname)\n return roi_fname, eroded_mask_file or in_mask\n" ]
[ [ "numpy.array", "numpy.savetxt", "scipy.ndimage.binary_erosion", "numpy.round", "numpy.sum", "numpy.allclose", "numpy.loadtxt", "numpy.apply_along_axis", "numpy.clip", "numpy.all", "pandas.read_csv", "numpy.asanyarray" ] ]
jstaffans/renewables
[ "4ab8f524db8fb64b52026351aab1c27b01703028" ]
[ "app/util.py" ]
[ "from datetime import datetime, timedelta\nimport pandas as pd\n\n\ndef _truncate_datetime(dt):\n return dt.replace(hour=0, minute=0, second=0, microsecond=0)\n\n\ndef hour_range(day):\n start_midnight = _truncate_datetime(day)\n hour = start_midnight\n while hour < start_midnight + timedelta(days=1):\n yield hour\n hour = hour + timedelta(hours=1)\n\n\ndef hour_now():\n return datetime.utcnow().replace(minute=0, second=0, microsecond=0)\n\n\ndef full_hour_series(start, end, resolution, min_hours=1):\n seconds = pd.Timedelta(resolution).total_seconds()\n time_delta = end - start.replace(minute=0)\n full_hour_periods = time_delta.floor(\"1h\").total_seconds() / seconds\n full_hour_periods = max((min_hours * 60 * 60) / seconds, full_hour_periods)\n return pd.date_range(start, periods=full_hour_periods, freq=resolution)\n" ]
[ [ "pandas.date_range", "pandas.Timedelta" ] ]
Coulbe/data-validation
[ "a2c5e5dd2cc50cce8a7eab5c7e72a0cb11b74b2c" ]
[ "tensorflow_data_validation/utils/slicing_util.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility function for generating slicing functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport collections\nimport functools\n\nfrom typing import Any, Dict, Iterable, Optional, Text, Union\nimport numpy as np\nimport pyarrow as pa\nimport six\nfrom tensorflow_data_validation import constants\nfrom tensorflow_data_validation import types\nfrom tensorflow_data_validation.arrow import arrow_util\nfrom pandas import DataFrame\nimport pandas as pd\nfrom tensorflow_data_validation.utils import stats_util\nfrom tfx_bsl.arrow import table_util\n\n_ValueType = Iterable[Union[Text, int]]\n\n_PARENT_INDEX_COLUMN = '__TFDV_INTERNAL_PARENT_INDEX__'\n_SLICE_KEY_COLUMN = '__TFDV_INTERNAL_SLICE_KEY__'\n\n\ndef default_slicer(\n record_batch: pa.RecordBatch) -> Iterable[types.SlicedRecordBatch]:\n \"\"\"Default slicing function that adds the default slice key to the input.\"\"\"\n yield (constants.DEFAULT_SLICE_KEY, record_batch)\n\n\ndef get_feature_value_slicer(\n features: Dict[types.FeatureName, Optional[_ValueType]]\n) -> types.SliceFunction:\n \"\"\"Returns a function that generates sliced record batches for a given one.\n\n The returned function returns sliced record batches based on the combination\n of all features specified in `features`. To slice on features separately (\n e.g., slice on age feature and separately slice on interests feature), you\n must use separate slice functions.\n\n Examples:\n # Slice on each value of the specified features.\n slice_fn = get_feature_value_slicer(\n features={'age': None, 'interests': None})\n\n # Slice on a specified feature value.\n slice_fn = get_feature_value_slicer(features={'interests': ['dogs']})\n\n # Slice on each value of one feature and a specified value of another.\n slice_fn = get_feature_value_slicer(\n features={'fruits': None, 'numbers': [1]})\n\n Args:\n features: A mapping of features to an optional iterable of values that the\n returned function will slice on. If values is None for a feature, then the\n slice keys will reflect each distinct value found for that feature in the\n input record batch. If values are specified for a feature, then the slice\n keys will reflect only those values for the feature, if found in the input\n record batch. Values must be an iterable of strings or integers.\n\n Returns:\n A function that takes as input a single record batch and returns a list of\n sliced record batches (slice_key, record_batch).\n\n Raises:\n TypeError: If feature values are not specified in an iterable.\n NotImplementedError: If a value of a type other than string or integer is\n specified in the values iterable in `features`.\n \"\"\"\n for values in features.values():\n if values is not None:\n if not isinstance(values, collections.Iterable):\n raise TypeError('Feature values must be specified in an iterable.')\n for value in values:\n if (not isinstance(value, (six.string_types, six.binary_type)) and\n not isinstance(value, int)):\n raise NotImplementedError(\n 'Only string and int values are supported as the slice value.')\n # Extract the unique slice values per feature.\n for feature_name in features:\n if features[feature_name] is not None:\n features[feature_name] = set(features[feature_name])\n\n def feature_value_slicer(record_batch: pa.RecordBatch) -> Iterable[\n types.SlicedRecordBatch]:\n \"\"\"A function that generates sliced record batches.\n\n The naive approach of doing this would be to iterate each row, identify\n slice keys for the row and keep track of index ranges for each slice key.\n And then generate an arrow record batch for each slice key based on the\n index ranges. This would be expensive as we are identifying the slice keys\n for each row individually and we would have to loop over the feature values\n including crossing them when we have to slice on multiple features. The\n current approach generates the slice keys for a batch by performing joins\n over indices of individual features. And then groups the joined record batch\n by slice key to get the row indices corresponding to a slice.\n\n Args:\n record_batch: Arrow RecordBatch.\n\n Yields:\n Sliced record batch (slice_key, record_batch) where record_batch contains\n the rows corresponding to a slice.\n \"\"\"\n per_feature_parent_indices = []\n for feature_name, values in six.iteritems(features):\n feature_array = record_batch.column(\n record_batch.schema.get_field_index(feature_name))\n flattened, value_parent_indices = arrow_util.flatten_nested(\n feature_array, True)\n non_missing_values = np.asarray(flattened)\n # Create dataframe with feature value and parent index.\n df = DataFrame({feature_name: non_missing_values,\n _PARENT_INDEX_COLUMN: value_parent_indices})\n df.drop_duplicates(inplace=True)\n # Filter based on slice values\n if values is not None:\n df = df.loc[df[feature_name].isin(values)]\n per_feature_parent_indices.append(df)\n\n # Join dataframes based on parent indices.\n # Note that we want the parent indices per slice key to be sorted in the\n # merged dataframe. The individual dataframes have the parent indices in\n # sorted order. We use \"inner\" join type to preserve the order of the left\n # keys (also note that same parent index rows would be consecutive). Hence\n # we expect the merged dataframe to have sorted parent indices per\n # slice key.\n merged_df = functools.reduce(\n lambda base, update: pd.merge(base, update, how='inner', # pylint: disable=g-long-lambda\n on=_PARENT_INDEX_COLUMN),\n per_feature_parent_indices)\n\n # Construct a new column in the merged dataframe with the slice keys.\n merged_df[_SLICE_KEY_COLUMN] = ''\n index = 0\n for col_name in sorted(merged_df.columns):\n if col_name in [_PARENT_INDEX_COLUMN, _SLICE_KEY_COLUMN]:\n continue\n slice_key_col = (_to_slice_key(col_name) + '_' +\n merged_df[col_name].apply(_to_slice_key))\n if index == 0:\n merged_df[_SLICE_KEY_COLUMN] = slice_key_col\n index += 1\n else:\n merged_df[_SLICE_KEY_COLUMN] += ('_' + slice_key_col)\n\n # Since the parent indices are sorted per slice key, the groupby would\n # preserve the sorted order within each group.\n per_slice_parent_indices = merged_df.groupby(\n _SLICE_KEY_COLUMN, sort=False)[_PARENT_INDEX_COLUMN]\n for slice_key, parent_indices in per_slice_parent_indices:\n yield (slice_key,\n table_util.RecordBatchTake(record_batch,\n pa.array(parent_indices.to_numpy())))\n\n return feature_value_slicer\n\n\ndef _to_slice_key(feature_value: Any):\n \"\"\"Decode slice key as UTF-8.\"\"\"\n # For bytes features we try decoding it as utf-8 (and throw an error if\n # fails). This is because in stats proto the slice name (dataset name) is a\n # string field which can only accept valid unicode.\n if isinstance(feature_value, six.binary_type):\n decoded_value = stats_util.maybe_get_utf8(feature_value)\n if decoded_value is None:\n raise ValueError('Feature names and slicing feature values must be valid'\n ' UTF-8. Found value {}.'.format(feature_value))\n return decoded_value\n return str(feature_value)\n\n\ndef generate_slices(\n record_batch: pa.RecordBatch,\n slice_functions: Iterable[types.SliceFunction], **kwargs\n ) -> Iterable[types.SlicedRecordBatch]:\n \"\"\"Generates sliced record batches based on provided slice functions.\n\n Args:\n record_batch: Arrow RecordBatch.\n slice_functions: An iterable of functions each of which takes as input an\n example (and zero or more kwargs) and returns a list of slice keys.\n **kwargs: Keyword arguments to pass to each of the slice_functions.\n\n Yields:\n Sliced record batch (slice_key, record batch).\n \"\"\"\n for slice_fn in slice_functions:\n try:\n for sliced_record_batch in slice_fn(record_batch, **kwargs):\n yield sliced_record_batch\n except Exception as e:\n raise ValueError('One of the slice_functions %s raised an exception: %s.'\n % (slice_fn.__name__, repr(e)))\n" ]
[ [ "pandas.DataFrame", "numpy.asarray", "pandas.merge" ] ]
Savimaster/Prodigy
[ "d4dcf516639348247ebdba2ba80a20dc3ff09a84" ]
[ "Recommendation/class_recommendation_system.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Class Recommendation System.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1cErX1ARVB1TLtVZVXof1HIzsuGl3K8Rw\n\"\"\"\nimport pandas as pd\nfrom rake_nltk import Rake\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport json\n\n# fake json string\ndata = '{\"\":{\"row1\":1,\"row2\":2,\"row3\":3},\"col2\":{\"row1\":\"x\",\"row2\":\"y\",\"row3\":\"z\"}}'\n\n# read json to data frame\ndf = pd.read_json(data)\nprint(df)\n\npd.set_option('display.max_columns', 10)\n# df = pd.read_csv('/Recommendation/data_file.csv') <- probably won't need this\nprint(df.head())\n\nprint(df.shape)\n\ndf = df[['Class-Title', 'Class-Type', 'Tutor', 'Desc']]\nprint(df.head())\n\n# discarding the commas between the tutor's full names and getting only the first three names\ndf['Tutor'] = df['Tutor'].map(lambda x: x.split(','))\n\n# merging together first and last name for each tutor, so it's considered as one word\nfor index, row in df.iterrows():\n row['Tutor'] = [x.lower().replace(' ', '') for x in row['Tutor']]\nprint(df.head(10))\n\n# initializing the new column\ndf['Key_words'] = \"\"\n\nfor index, row in df.iterrows():\n description = row['Desc']\n\n # instantiating Rake, uses english stopwords from NLTK and discard all puntuation characters\n x = Rake()\n\n # extracting the words by passing the text\n x.extract_keywords_from_text(description)\n\n # getting the dictionary whith key words and their scores\n key_words_dict_scores = x.get_word_degrees()\n\n # assigning the key words to the new column\n row['Key_words'] = list(key_words_dict_scores.keys())\n\n# dropping the description column\ndf.drop(columns=['Desc'], inplace=True)\n\ndf.set_index('Class-Title', inplace=True)\nprint(df.head())\n\ndf['combined_words'] = ''\ncolumns = df.columns\nfor index, row in df.iterrows():\n words = ''\n for col in columns:\n words = words + ' '.join(row[col]) + ' '\n row['combined_words'] = words\n\ndf.drop(columns=[col for col in df.columns if col !=\n 'combined_words'], inplace=True)\n\nprint(df.head())\n\n# instantiating and generating the count matrix\ncount = CountVectorizer()\ncount_matrix = count.fit_transform(df['combined_words'])\n\n# creating a Series for the class titles so they are associated to an ordered numerical\nindices = pd.Series(df.index)\nprint(indices[:5])\n\n# generating the cosine similarity matrix\ncosine_sim = cosine_similarity(count_matrix, count_matrix)\nprint(cosine_sim)\n\n# creating a Series for the class titles so they are associated to an ordered numerical\n\n\ndef recommendations(title, cosine_sim=cosine_sim):\n\n recommended_classes = []\n\n # getting the index of the class that matches the title\n idx = indices[indices == title].index[0]\n\n # creating a Series with the similarity scores in descending order\n score_series = pd.Series(cosine_sim[idx]).sort_values(ascending=False)\n\n # getting the indexes of the 5 most similar classes\n top_indexes = list(score_series.iloc[1:6].index)\n\n for i in top_indexes:\n recommended_classes.append(list(df.index)[i])\n\n return recommended_classes\n\n" ]
[ [ "pandas.set_option", "pandas.read_json", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.metrics.pairwise.cosine_similarity", "pandas.Series" ] ]
bariluz93/nematus
[ "b9c03b50647f4e0a8f5991b5a265c9a6a84d04ca" ]
[ "nematus/translate_utils.py" ]
[ "import logging\nimport sys\nimport time\nimport numpy\nimport tensorflow as tf\n\n# ModuleNotFoundError is new in 3.6; older versions will throw SystemError\nif sys.version_info < (3, 6):\n ModuleNotFoundError = SystemError\n\ntry:\n from . import exception\n from . import util\nexcept (ModuleNotFoundError, ImportError) as e:\n import exception\n import util\nimport sys\nsys.path.append(\"..\") # Adds higher directory to python modules path.\nfrom consts import get_u_l_c_p\n\nMIN_LINE_NUM = 1552\ndef translate_batch(session, sampler, x, x_mask, max_translation_len,\n normalization_alpha):\n \"\"\"Translate a batch using a RandomSampler or BeamSearchSampler.\n\n Args:\n session: a TensorFlow session.\n sampler: a BeamSearchSampler or RandomSampler object.\n x: input Tensor with shape (factors, max_seq_len, batch_size).\n x_mask: mask Tensor for x with shape (max_seq_len, batch_size).\n max_translation_len: integer specifying maximum translation length.\n normalization_alpha: float specifying alpha parameter for length\n normalization.\n\n Returns:\n A list of lists of (translation, score) pairs. The outer list contains\n one list for each input sentence in the batch. The inner lists contain\n k elements (where k is the beam size), sorted by score in best-first\n order.\n \"\"\"\n\n x_tiled = numpy.tile(x, reps=[1, 1, sampler.beam_size])\n x_mask_tiled = numpy.tile(x_mask, reps=[1, sampler.beam_size])\n\n feed_dict = {}\n\n # Feed inputs to the models.\n for model, config in zip(sampler.models, sampler.configs):\n if config.model_type == 'rnn':\n feed_dict[model.inputs.x] = x_tiled\n feed_dict[model.inputs.x_mask] = x_mask_tiled\n else:\n assert config.model_type == 'transformer'\n # Inputs don't need to be tiled in the Transformer because it\n # checks for different batch sizes in the encoder and decoder and\n # does its own tiling internally at the connection points.\n feed_dict[model.inputs.x] = x\n feed_dict[model.inputs.x_mask] = x_mask\n feed_dict[model.inputs.training] = False\n\n # Feed inputs to the sampler.\n feed_dict[sampler.inputs.batch_size_x] = x.shape[-1]\n feed_dict[sampler.inputs.max_translation_len] = max_translation_len\n feed_dict[sampler.inputs.normalization_alpha] = normalization_alpha\n\n # Run the sampler.\n translations, scores = session.run(sampler.outputs, feed_dict=feed_dict)\n\n assert len(translations) == x.shape[-1]\n assert len(scores) == x.shape[-1]\n\n # Sort the translations by score. The scores are (optionally normalized)\n # log probs so higher values are better.\n beams = []\n for i in range(len(translations)):\n pairs = zip(translations[i], scores[i])\n beams.append(sorted(pairs, key=lambda pair: pair[1], reverse=True))\n\n return beams\n\n\ndef translate_file(input_file, output_file, session, sampler, config,\n max_translation_len, normalization_alpha, consts_config_str, nbest=False,\n minibatch_size=80, maxibatch_size=20):\n \"\"\"Translates a source file using a RandomSampler or BeamSearchSampler.\n\n Args:\n input_file: file object from which source sentences will be read.\n output_file: file object to which translations will be written.\n session: TensorFlow session.\n sampler: BeamSearchSampler or RandomSampler object.\n config: model config.\n max_translation_len: integer specifying maximum translation length.\n normalization_alpha: float specifying alpha parameter for length\n normalization.\n nbest: if True, produce n-best output with scores; otherwise 1-best.\n minibatch_size: minibatch size in sentences.\n maxibatch_size: number of minibatches to read and sort, pre-translation.\n \"\"\"\n\n def translate_maxibatch(maxibatch, num_to_target, num_prev_translated, line_num):\n \"\"\"Translates an individual maxibatch.\n\n Args:\n maxibatch: a list of sentences.\n num_to_target: dictionary mapping target vocabulary IDs to strings.\n num_prev_translated: the number of previously translated sentences.\n \"\"\"\n # Sort the maxibatch by length and split into minibatches.\n try:\n minibatches, idxs = util.read_all_lines(config, maxibatch,\n minibatch_size)\n except exception.Error as x:\n logging.error(x.msg)\n sys.exit(1)\n\n # Translate the minibatches and store the resulting beam (i.e.\n # translations and scores) for each sentence.\n beams = []\n for x in minibatches:\n y_dummy = numpy.zeros(shape=(len(x),1))\n x, x_mask, _, _ = util.prepare_data(x, y_dummy, config.factors,\n maxlen=None)\n sample = translate_batch(session, sampler, x, x_mask,\n max_translation_len, normalization_alpha)\n beams.extend(sample)\n num_translated = num_prev_translated + len(beams)\n logging.info('Translated {} sents'.format(num_translated))\n\n # Put beams into the same order as the input maxibatch.\n tmp = numpy.array(beams, dtype=numpy.object)\n ordered_beams = tmp[idxs.argsort()]\n\n # Write the translations to the output file.\n for i, beam in enumerate(ordered_beams):\n if nbest:\n num = num_prev_translated + i\n for sent, cost in beam:\n translation = util.seq2words(sent, num_to_target)\n line = \"{} ||| {} ||| {}\\n\".format(num, translation,\n str(cost))\n output_file.write(line)\n else:\n best_hypo, cost = beam[0]\n line = util.seq2words(best_hypo, num_to_target) + '\\n'\n if PRINT_LINE_NUMS:\n line = \"{} ||| {}\".format(line_num, line)\n output_file.write(line)\n\n _, _, COLLECT_EMBEDDING_TABLE, PRINT_LINE_NUMS = get_u_l_c_p(consts_config_str)\n _, _, _, num_to_target = util.load_dictionaries(config)\n\n logging.info(\"NOTE: Length of translations is capped to {}\".format(\n max_translation_len))\n\n start_time = time.time()\n\n num_translated = 0\n maxibatch = []\n line_num = 0\n while True:\n if COLLECT_EMBEDDING_TABLE and line_num>1:\n break\n try:\n line = input_file.readline()\n # print(line)\n line_num+=1\n if line == \"\":\n if len(maxibatch) > 0:\n translate_maxibatch(maxibatch, num_to_target, num_translated, line_num)\n num_translated += len(maxibatch)\n break\n # if line_num< MIN_LINE_NUM:\n # print(\"not translating line num: \"+str(line_num))\n # continue\n maxibatch.append(line)\n # if len(maxibatch) == (maxibatch_size * minibatch_size):\n if len(maxibatch) == (1 ):\n translate_maxibatch(maxibatch, num_to_target, num_translated, line_num)\n num_translated += len(maxibatch)\n maxibatch = []\n except:\n print (\"line number \"+str(line_num)+\" wasn't translated: \"+line)\n maxibatch = []\n continue\n\n duration = time.time() - start_time\n logging.info('Translated {} sents in {} sec. Speed {} sents/sec'.format(\n num_translated, duration, num_translated/duration))\n" ]
[ [ "numpy.array", "numpy.tile" ] ]
Bartolo1024/ignite
[ "b087fef0bc5f97cda415c1c56f1cd589383c54be" ]
[ "tests/ignite/handlers/test_checkpoint.py" ]
[ "import os\nimport warnings\nfrom unittest.mock import MagicMock\n\nimport pytest\nimport torch\nimport torch.nn as nn\n\nimport ignite.distributed as idist\nfrom ignite.engine import Engine, Events, State\nfrom ignite.handlers import Checkpoint, DiskSaver, ModelCheckpoint\nfrom ignite.handlers.checkpoint import BaseSaveHandler\n\n_PREFIX = \"PREFIX\"\n\n\nclass DummyModel(nn.Module):\n def __init__(self):\n super(DummyModel, self).__init__()\n self.net = nn.Linear(1, 1)\n\n def forward(self, x):\n return self.net(x)\n\n\nclass DummyPretrainedModel(nn.Module):\n def __init__(self):\n super(DummyPretrainedModel, self).__init__()\n self.features = nn.Linear(4, 2, bias=False)\n self.fc = nn.Linear(2, 1)\n\n def forward(self, x):\n x = self.features(x)\n x = self.fc(x)\n return x\n\n\ndef test_checkpoint_wrong_input():\n\n with pytest.raises(TypeError, match=r\"Argument `to_save` should be a dictionary\"):\n Checkpoint(12, lambda x: x, \"prefix\")\n\n with pytest.raises(TypeError, match=r\"Argument `to_save` should be a dictionary\"):\n Checkpoint([12], lambda x: x, \"prefix\")\n\n with pytest.raises(ValueError, match=r\"No objects to checkpoint.\"):\n Checkpoint({}, lambda x: x, \"prefix\")\n\n model = DummyModel()\n to_save = {\"model\": model}\n\n with pytest.raises(TypeError, match=r\"Argument `save_handler` should be callable\"):\n Checkpoint(to_save, 12, \"prefix\")\n\n with pytest.raises(\n ValueError, match=r\"If `score_name` is provided, then `score_function` should be also provided.\"\n ):\n Checkpoint(to_save, lambda x: x, score_name=\"acc\")\n\n with pytest.raises(TypeError, match=r\"global_step_transform should be a function.\"):\n Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name=\"acc\", global_step_transform=123)\n\n with pytest.warns(UserWarning, match=r\"Argument archived is deprecated\"):\n Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name=\"acc\", archived=True)\n\n\ndef test_checkpoint_score_function_wrong_output():\n model = DummyModel()\n to_save = {\"model\": model}\n\n checkpointer = Checkpoint(to_save, lambda x: x, score_function=lambda e: {\"1\": 1}, score_name=\"acc\")\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n with pytest.raises(ValueError, match=r\"Output of score_function should be a number\"):\n checkpointer(trainer)\n\n\ndef test_checkpoint_default():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler)\n assert checkpointer.last_checkpoint is None\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": None, \"priority\": 0}\n save_handler.assert_called_with(obj, \"{}_0.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 1234\n save_handler.assert_called_with(obj, \"{}_1234.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_0.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_1234.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_dp():\n\n model = DummyModel()\n dp_model = nn.DataParallel(model)\n to_save = {\"model\": dp_model}\n\n save_handler = MagicMock(spec=BaseSaveHandler)\n checkpointer = Checkpoint(to_save, save_handler=save_handler)\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n metadata = {\"basename\": \"model\", \"score_name\": None, \"priority\": 0}\n save_handler.assert_called_with(model.state_dict(), \"model_0.pt\", metadata)\n\n\ndef test_checkpoint_with_global_step_transform():\n def _test(filename_prefix, to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n filename_prefix=filename_prefix,\n global_step_transform=lambda e, _: e.state.epoch,\n )\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=1, iteration=1)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n if len(filename_prefix) > 0:\n filename_prefix += \"_\"\n\n metadata = {\"basename\": \"{}{}\".format(filename_prefix, name), \"score_name\": None, \"priority\": 1}\n save_handler.assert_called_with(obj, \"{}{}_1.pt\".format(filename_prefix, name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 1234\n save_handler.assert_called_with(obj, \"{}{}_12.pt\".format(filename_prefix, name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}{}_1.pt\".format(filename_prefix, name))\n assert checkpointer.last_checkpoint == \"{}{}_12.pt\".format(filename_prefix, name)\n\n for prefix in [\"\", \"dummytask\"]:\n model = DummyModel()\n to_save = {\"model\": model}\n _test(prefix, to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(prefix, to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_score_function():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler, score_function=lambda e: e.state.score)\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=1, iteration=1, score=0.77)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": None, \"priority\": 0.77}\n save_handler.assert_called_with(obj, \"{}_0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n trainer.state.score = 0.78\n\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 0.78\n save_handler.assert_called_with(obj, \"{}_0.7800.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_0.7800.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_score_name_and_function():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(\n to_save, save_handler=save_handler, score_name=\"loss\", score_function=lambda e: e.state.score\n )\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=1, iteration=1, score=-0.77)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": \"loss\", \"priority\": -0.77}\n save_handler.assert_called_with(obj, \"{}_loss=-0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n trainer.state.score = -0.76\n\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = -0.76\n save_handler.assert_called_with(obj, \"{}_loss=-0.7600.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_loss=-0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_loss=-0.7600.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_int_score():\n def _test(to_save, obj, name, score_name=None):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(\n to_save, save_handler=save_handler, score_name=score_name, score_function=lambda e: e.state.epoch\n )\n\n if score_name is None:\n score_name = \"\"\n else:\n score_name += \"=\"\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=1, iteration=1)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": score_name[:-1] if len(score_name) > 0 else None, \"priority\": 1}\n save_handler.assert_called_with(obj, \"{}_{}1.pt\".format(name, score_name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 12\n save_handler.assert_called_with(obj, \"{}_{}12.pt\".format(name, score_name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_{}1.pt\".format(name, score_name))\n assert checkpointer.last_checkpoint == \"{}_{}12.pt\".format(name, score_name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n _test(to_save, model.state_dict(), \"model\", \"epoch\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\", \"epoch\")\n\n\ndef test_checkpoint_with_score_function_and_trainer_epoch():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n trainer = Engine(lambda e, b: None)\n evaluator = Engine(lambda e, b: None)\n trainer.state = State(epoch=11, iteration=1)\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n global_step_transform=lambda _1, _2: trainer.state.epoch,\n score_function=lambda e: e.state.metrics[\"val_acc\"],\n )\n\n evaluator.state = State(epoch=1, iteration=1000, metrics={\"val_acc\": 0.77})\n checkpointer(evaluator)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": None, \"priority\": 0.77}\n save_handler.assert_called_with(obj, \"{}_11_0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n evaluator.state.metrics[\"val_acc\"] = 0.78\n\n checkpointer(evaluator)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 0.78\n save_handler.assert_called_with(obj, \"{}_12_0.7800.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_11_0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_12_0.7800.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n\ndef test_checkpoint_with_score_name_and_function_and_trainer_epoch():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n trainer = Engine(lambda e, b: None)\n evaluator = Engine(lambda e, b: None)\n trainer.state = State(epoch=11, iteration=1)\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n global_step_transform=lambda _1, _2: trainer.state.epoch,\n score_name=\"val_acc\",\n score_function=lambda e: e.state.metrics[\"val_acc\"],\n )\n\n evaluator.state = State(epoch=1, iteration=1000, metrics={\"val_acc\": 0.77})\n\n checkpointer(evaluator)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": \"val_acc\", \"priority\": 0.77}\n save_handler.assert_called_with(obj, \"{}_11_val_acc=0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n evaluator.state.metrics[\"val_acc\"] = 0.78\n\n checkpointer(evaluator)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 0.78\n save_handler.assert_called_with(obj, \"{}_12_val_acc=0.7800.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_11_val_acc=0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_12_val_acc=0.7800.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n\ndef test_checkpoint_last_checkpoint():\n save_handler = MagicMock(spec=BaseSaveHandler)\n to_save = {\"model\": DummyModel()}\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)\n\n trainer = Engine(lambda e, b: None)\n\n for i in range(10):\n trainer.state = State(epoch=1, iteration=i)\n checkpointer(trainer)\n\n assert save_handler.call_count == 10\n assert checkpointer.last_checkpoint == \"{}_9.pt\".format(\"model\")\n\n\ndef test_checkpoint_last_checkpoint_on_score():\n save_handler = MagicMock(spec=BaseSaveHandler)\n to_save = {\"model\": DummyModel()}\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n n_saved=None,\n score_name=\"val_acc\",\n score_function=lambda e: e.state.metrics[\"val_acc\"],\n )\n\n trainer = Engine(lambda e, b: None)\n\n val_acc = 0.0\n for i in range(10):\n val_acc = i * 0.1\n trainer.state = State(epoch=1, iteration=i, metrics={\"val_acc\": val_acc})\n checkpointer(trainer)\n\n assert save_handler.call_count == 10\n assert checkpointer.last_checkpoint == \"{}_val_acc=0.9000.pt\".format(\"model\")\n\n\ndef test_checkpoint_save_handler_callable():\n def save_handler(c, f):\n assert f == \"model_12.pt\"\n\n to_save = {\"model\": DummyModel()}\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler,)\n\n trainer = Engine(lambda e, b: None)\n\n trainer.state = State(epoch=1, iteration=12)\n checkpointer(trainer)\n\n\ndef test_model_checkpoint_args_validation(dirname):\n existing = os.path.join(dirname, \"existing_dir\")\n nonempty = os.path.join(dirname, \"nonempty\")\n\n os.makedirs(existing)\n os.makedirs(nonempty)\n\n with open(os.path.join(nonempty, \"{}_name_0.pt\".format(_PREFIX)), \"w\"):\n pass\n\n with pytest.raises(ValueError, match=r\"with extension '.pt' are already present \"):\n ModelCheckpoint(nonempty, _PREFIX)\n\n with pytest.raises(ValueError, match=r\"Argument save_interval is deprecated and should be None\"):\n ModelCheckpoint(existing, _PREFIX, save_interval=42)\n\n with pytest.raises(ValueError, match=r\"Directory path '\\S+' is not found\"):\n ModelCheckpoint(os.path.join(dirname, \"non_existing_dir\"), _PREFIX, create_dir=False)\n\n with pytest.raises(ValueError, match=r\"Argument save_as_state_dict is deprecated and should be True\"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, save_as_state_dict=False)\n\n with pytest.raises(ValueError, match=r\"If `score_name` is provided, then `score_function` \"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, score_name=\"test\")\n\n with pytest.raises(TypeError, match=r\"global_step_transform should be a function\"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234)\n\n with pytest.warns(UserWarning, match=r\"Argument archived is deprecated\"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, archived=True)\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)\n assert h.last_checkpoint is None\n with pytest.raises(RuntimeError, match=r\"No objects to checkpoint found.\"):\n h(None, [])\n\n\ndef test_model_checkpoint_simple_recovery(dirname):\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=1)\n\n model = DummyModel()\n to_save = {\"model\": model}\n h(engine, to_save)\n\n fname = h.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n assert loaded_objects == model.state_dict()\n\n\ndef test_model_checkpoint_simple_recovery_from_existing_non_empty(dirname):\n def _test(ext, require_empty):\n previous_fname = os.path.join(dirname, \"{}_{}_{}{}\".format(_PREFIX, \"obj\", 1, ext))\n with open(previous_fname, \"w\") as f:\n f.write(\"test\")\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=True, require_empty=require_empty)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=1)\n\n model = DummyModel()\n to_save = {\"model\": model}\n h(engine, to_save)\n\n fname = h.last_checkpoint\n ext = \".pt\"\n assert isinstance(fname, str)\n assert os.path.join(dirname, \"{}_{}_{}{}\".format(_PREFIX, \"model\", 1, ext)) == fname\n assert os.path.exists(fname)\n assert os.path.exists(previous_fname)\n loaded_objects = torch.load(fname)\n assert loaded_objects == model.state_dict()\n os.remove(fname)\n\n _test(\".txt\", require_empty=True)\n _test(\".pt\", require_empty=False)\n\n\ndef test_disk_saver_atomic(dirname):\n\n model = DummyModel()\n to_save_serializable = {\"model\": model}\n to_save_non_serializable = {\"model\": lambda x: x}\n\n def _test_existance(atomic, _to_save, expected):\n\n saver = DiskSaver(dirname, atomic=atomic, create_dir=False, require_empty=False)\n fname = \"test.pt\"\n try:\n with warnings.catch_warnings():\n # Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type\n # DummyModel. It won't be checked for correctness upon loading.\n warnings.simplefilter(\"ignore\", category=UserWarning)\n saver(_to_save, fname)\n except Exception:\n pass\n fp = os.path.join(saver.dirname, fname)\n assert os.path.exists(fp) == expected\n if expected:\n saver.remove(fname)\n\n _test_existance(atomic=False, _to_save=to_save_serializable, expected=True)\n _test_existance(atomic=False, _to_save=to_save_non_serializable, expected=True)\n\n _test_existance(atomic=True, _to_save=to_save_serializable, expected=True)\n _test_existance(atomic=True, _to_save=to_save_non_serializable, expected=False)\n\n\ndef test_last_k(dirname):\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n h(engine, to_save)\n\n for i in range(1, 9):\n engine.state.iteration = i\n h(engine, to_save)\n\n expected = [\"{}_{}_{}.pt\".format(_PREFIX, \"model\", i) for i in [7, 8]]\n\n assert sorted(os.listdir(dirname)) == expected, \"{} vs {}\".format(sorted(os.listdir(dirname)), expected)\n\n\ndef test_disabled_n_saved(dirname):\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=None)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n\n num_iters = 100\n for i in range(num_iters):\n engine.state.iteration = i\n h(engine, to_save)\n\n saved_files = sorted(os.listdir(dirname))\n assert len(saved_files) == num_iters, \"{}\".format(saved_files)\n\n expected = sorted([\"{}_{}_{}.pt\".format(_PREFIX, \"model\", i) for i in range(num_iters)])\n assert saved_files == expected, \"{} vs {}\".format(saved_files, expected)\n\n\ndef test_best_k(dirname):\n scores = iter([1.2, -2.0, 3.1, -4.0])\n\n def score_function(_):\n return next(scores)\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n for _ in range(4):\n h(engine, to_save)\n\n expected = [\"{}_{}_{:.4f}.pt\".format(_PREFIX, \"model\", i) for i in [1.2, 3.1]]\n\n assert sorted(os.listdir(dirname)) == expected\n\n\ndef test_best_k_with_suffix(dirname):\n scores = [0.3456789, 0.1234, 0.4567, 0.134567]\n scores_iter = iter(scores)\n\n def score_function(engine):\n return next(scores_iter)\n\n h = ModelCheckpoint(\n dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function, score_name=\"val_loss\"\n )\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n for _ in range(4):\n engine.state.epoch += 1\n h(engine, to_save)\n\n expected = [\"{}_{}_val_loss={:.4}.pt\".format(_PREFIX, \"model\", scores[e - 1]) for e in [1, 3]]\n\n assert sorted(os.listdir(dirname)) == expected\n\n\ndef test_removes_each_score_at_most_once(dirname):\n scores = [0, 1, 1, 2, 3]\n scores_iter = iter(scores)\n\n def score_function(_):\n return next(scores_iter)\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n for _ in range(len(scores)):\n h(engine, to_save)\n\n # If a score was removed multiple times, the code above would have raise a\n # FileNotFoundError. So this just tests the absence of such a failure\n # without futher assertions.\n\n\ndef test_with_engine(dirname):\n def update_fn(_1, _2):\n pass\n\n name = \"model\"\n engine = Engine(update_fn)\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)\n\n model = DummyModel()\n to_save = {\"model\": model}\n engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)\n engine.run([0], max_epochs=4)\n\n expected = [\"{}_{}_{}.pt\".format(_PREFIX, name, i) for i in [3, 4]]\n\n assert sorted(os.listdir(dirname)) == expected\n\n\ndef test_with_state_dict(dirname):\n def update_fn(_1, _2):\n pass\n\n engine = Engine(update_fn)\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n\n model = DummyModel()\n to_save = {\"model\": model}\n engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)\n engine.run([0], max_epochs=4)\n\n saved_model = os.path.join(dirname, os.listdir(dirname)[0])\n load_model = torch.load(saved_model)\n\n assert not isinstance(load_model, DummyModel)\n assert isinstance(load_model, dict)\n\n model_state_dict = model.state_dict()\n loaded_model_state_dict = load_model\n for key in model_state_dict.keys():\n assert key in loaded_model_state_dict\n\n model_value = model_state_dict[key]\n loaded_model_value = loaded_model_state_dict[key]\n\n assert model_value.numpy() == loaded_model_value.numpy()\n\n\ndef test_valid_state_dict_save(dirname):\n model = DummyModel()\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n to_save = {\"name\": 42}\n with pytest.raises(TypeError, match=r\"should have `state_dict` method\"):\n h(engine, to_save)\n to_save = {\"name\": model}\n try:\n h(engine, to_save)\n except ValueError:\n pytest.fail(\"Unexpected ValueError\")\n\n\ndef _test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname, on_zero_rank=False):\n\n torch.manual_seed(23)\n\n model = DummyModel().to(device)\n\n optim = torch.optim.SGD(model.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)\n\n def update_fn(engine, batch):\n x = torch.rand((4, 1)).to(device)\n optim.zero_grad()\n y = model(x)\n loss = y.pow(2.0).sum()\n loss.backward()\n if idist.has_xla_support:\n import torch_xla.core.xla_model as xm\n\n xm.optimizer_step(optim, barrier=True)\n else:\n optim.step()\n lr_scheduler.step()\n\n engine = Engine(update_fn)\n\n if (not on_zero_rank) or (on_zero_rank and idist.get_rank() == 0):\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=True, n_saved=1)\n\n engine.add_event_handler(\n Events.EPOCH_COMPLETED, handler, {\"model\": model, \"optimizer\": optim, \"lr_scheduler\": lr_scheduler}\n )\n\n engine.run([0], max_epochs=4)\n\n idist.barrier()\n\n saved_objects = sorted(os.listdir(dirname))\n # saved object is ['PREFIX_checkpoint_3.pt', ]\n saved_checkpoint = os.path.join(dirname, saved_objects[0])\n\n if idist.has_xla_support:\n device = \"cpu\"\n\n loaded_obj = torch.load(saved_checkpoint, map_location=device)\n for f in [\"model\", \"optimizer\", \"lr_scheduler\"]:\n assert f in loaded_obj\n loaded_model_state_dict = loaded_obj[\"model\"]\n loaded_optimizer_state_dict = loaded_obj[\"optimizer\"]\n loaded_lr_scheduler_state_dict = loaded_obj[\"lr_scheduler\"]\n\n assert isinstance(loaded_model_state_dict, dict)\n assert isinstance(loaded_optimizer_state_dict, dict)\n assert isinstance(loaded_lr_scheduler_state_dict, dict)\n\n # Specifically move device to CPU first\n model_state_dict = model.cpu().state_dict()\n for key in model_state_dict.keys():\n assert key in loaded_model_state_dict\n model_value = model_state_dict[key]\n loaded_model_value = loaded_model_state_dict[key]\n assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()\n\n optim_state_dict = optim.state_dict()\n for key in optim_state_dict.keys():\n assert key in loaded_optimizer_state_dict\n optim_value = optim_state_dict[key]\n loaded_optim_value = loaded_optimizer_state_dict[key]\n if idist.get_rank() == 0:\n assert optim_value == loaded_optim_value\n\n lr_scheduler_state_dict = lr_scheduler.state_dict()\n for key in lr_scheduler_state_dict.keys():\n assert key in loaded_lr_scheduler_state_dict\n lr_scheduler_value = lr_scheduler_state_dict[key]\n loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]\n assert lr_scheduler_value == loaded_lr_scheduler_value\n\n\ndef test_save_model_optimizer_lr_scheduler_with_state_dict(dirname):\n _test_save_model_optimizer_lr_scheduler_with_state_dict(\"cpu\", dirname)\n\n\ndef test_checkpoint_load_objects():\n\n with pytest.raises(TypeError, match=r\"Argument checkpoint should be a dictionary\"):\n Checkpoint.load_objects({}, [])\n\n with pytest.raises(TypeError, match=r\"should have `load_state_dict` method\"):\n Checkpoint.load_objects({\"a\": None}, {\"a\": None})\n\n model = DummyModel()\n to_load = {\"model\": model, \"another_model\": model}\n\n with pytest.raises(ValueError, match=r\"from `to_load` is not found in the checkpoint\"):\n Checkpoint.load_objects(to_load, {})\n\n model = DummyModel()\n to_load = {\"model\": model}\n model2 = DummyModel()\n\n chkpt = {\"model\": model2.state_dict()}\n Checkpoint.load_objects(to_load, chkpt)\n assert model.state_dict() == model2.state_dict()\n\n\ndef test_checkpoint_load_objects_from_saved_file(dirname):\n def _get_single_obj_to_save():\n model = DummyModel()\n to_save = {\"model\": model}\n return to_save\n\n def _get_multiple_objs_to_save():\n model = DummyModel()\n optim = torch.optim.SGD(model.parameters(), lr=0.001)\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)\n to_save = {\"model\": model, \"optimizer\": optim, \"lr_scheduler\": lr_scheduler}\n return to_save\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n # case: multiple objects\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n to_save = _get_multiple_objs_to_save()\n handler(trainer, to_save)\n fname = handler.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n Checkpoint.load_objects(to_save, loaded_objects)\n os.remove(fname)\n\n # case: saved multiple objects, loaded single object\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n to_save = _get_multiple_objs_to_save()\n handler(trainer, to_save)\n fname = handler.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n to_load = {\"model\": to_save[\"model\"]}\n Checkpoint.load_objects(to_load, loaded_objects)\n os.remove(fname)\n\n # case: single object\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n to_save = _get_single_obj_to_save()\n handler(trainer, to_save)\n fname = handler.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n Checkpoint.load_objects(to_save, loaded_objects)\n\n\ndef test_load_checkpoint_with_different_num_classes(dirname):\n model = DummyPretrainedModel()\n to_save_single_object = {\"model\": model}\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n handler(trainer, to_save_single_object)\n\n fname = handler.last_checkpoint\n loaded_checkpoint = torch.load(fname)\n\n to_load_single_object = {\"pretrained_features\": model.features}\n\n with pytest.raises(RuntimeError):\n Checkpoint.load_objects(to_load_single_object, loaded_checkpoint)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n Checkpoint.load_objects(to_load_single_object, loaded_checkpoint, strict=False, blah=\"blah\")\n\n loaded_weights = to_load_single_object[\"pretrained_features\"].state_dict()[\"weight\"]\n\n assert torch.all(model.state_dict()[\"features.weight\"].eq(loaded_weights))\n\n\ndef test_disksaver_wrong_input(dirname):\n\n with pytest.raises(ValueError, match=r\"Directory path '\\S+' is not found\"):\n DiskSaver(\"/tmp/non-existing-folder\", create_dir=False)\n\n def _test(ext):\n previous_fname = os.path.join(dirname, \"{}_{}_{}{}\".format(_PREFIX, \"obj\", 1, ext))\n with open(previous_fname, \"w\") as f:\n f.write(\"test\")\n\n with pytest.raises(ValueError, match=r\"with extension '.pt' are already present\"):\n DiskSaver(dirname, require_empty=True)\n\n _test(\".pt\")\n\n\ndef _test_checkpoint_with_ddp(device):\n torch.manual_seed(0)\n\n model = DummyModel().to(device)\n device_ids = (\n None if \"cpu\" in device.type else [device,]\n )\n ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)\n to_save = {\"model\": ddp_model}\n\n save_handler = MagicMock(spec=BaseSaveHandler)\n checkpointer = Checkpoint(to_save, save_handler=save_handler)\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n metadata = {\"basename\": \"model\", \"score_name\": None, \"priority\": 0}\n save_handler.assert_called_with(model.state_dict(), \"model_0.pt\", metadata)\n\n\[email protected]\ndef test_distrib_cpu(distributed_context_single_node_gloo, get_rank_zero_dirname):\n device = torch.device(\"cpu\")\n dirname = get_rank_zero_dirname()\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"2\"), on_zero_rank=True)\n _test_checkpoint_with_ddp(device)\n\n\[email protected]\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_distrib_gpu(distributed_context_single_node_nccl, get_rank_zero_dirname):\n device = idist.device()\n dirname = get_rank_zero_dirname()\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(\"cpu\", os.path.join(dirname, \"2\"), on_zero_rank=True)\n _test_checkpoint_with_ddp(device=device)\n\n\ndef _test_tpu_saves_to_cpu(device, dirname):\n torch.manual_seed(0)\n\n h = ModelCheckpoint(dirname, _PREFIX)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=1)\n\n model = DummyModel().to(device)\n to_save = {\"model\": model}\n\n h(engine, to_save)\n\n idist.barrier()\n\n fname = h.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n assert loaded_objects == model.cpu().state_dict()\n\n\[email protected]\[email protected](\"NUM_TPU_WORKERS\" in os.environ, reason=\"Skip if NUM_TPU_WORKERS is in env vars\")\[email protected](not idist.has_xla_support, reason=\"Not on TPU device\")\ndef test_distrib_single_device_xla(dirname):\n assert \"xla\" in idist.device().type\n _test_tpu_saves_to_cpu(idist.device(), os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(idist.device(), os.path.join(dirname, \"2\"))\n\n\ndef _test_tpu_saves_to_cpu_nprocs(index, dirname):\n device = idist.device()\n _test_tpu_saves_to_cpu(device, os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"2\"))\n\n import time\n\n # hack to have all proc properly sync:\n time.sleep(1)\n\n\[email protected]\[email protected](\"NUM_TPU_WORKERS\" not in os.environ, reason=\"Skip if NUM_TPU_WORKERS is in env vars\")\[email protected](not idist.has_xla_support, reason=\"Not on TPU device\")\ndef test_distrib_single_device_xla_nprocs(xmp_executor, dirname):\n n = int(os.environ[\"NUM_TPU_WORKERS\"])\n xmp_executor(_test_tpu_saves_to_cpu_nprocs, args=(dirname,), nprocs=n)\n" ]
[ [ "torch.nn.Linear", "torch.device", "torch.rand", "torch.optim.lr_scheduler.ExponentialLR", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.device_count", "torch.manual_seed", "torch.load", "torch.nn.DataParallel" ] ]
Venka97/climART
[ "b2246231f3ba8372d33e564700b872c410e33036" ]
[ "climart/models/column_handler.py" ]
[ "from typing import Dict, Optional, Tuple, Sequence, Any\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom einops import repeat\n\nfrom climart.models.GNs.constants import NODES, EDGES\nfrom climart.data_wrangling.constants import LAYERS, LEVELS, GLOBALS\nfrom climart.models.additional_layers import FeatureProjector\nfrom climart.utils.utils import normalize_adjacency_matrix_torch, identity, get_logger\n\nlog = get_logger(__name__)\n\n\ndef get_dict_entry(dictio: dict, possible_keys: Sequence[Any]):\n for possible_key in possible_keys:\n if possible_key in dictio.keys():\n return dictio[possible_key]\n else:\n pass\n\n\nclass ColumnPreprocesser:\n ONLY_LAYER_NODES = ['duplication', 'graph_net_layer_nodes', 'identity']\n ONLY_LEVEL_NODES = ['graph_net_level_nodes']\n\n def __init__(self,\n n_layers: int,\n input_dims: Dict[str, int],\n preprocessing: str,\n projector_hidden_dim: int = 128, # only if preprocessing == 'mlp'\n projector_n_layers: int = 1, # only if preprocessing == 'mlp'\n projector_net_normalization: str = 'layer_norm', # only if preprocessing == 'mlp'\n drop_node_encoding: bool = True, # only used if preprocessing == 'duplication'\n node_encoding_len: int = 3, # only used if preprocessing == 'duplication'\n use_level_features: bool = True,\n drop_last_level: bool = True # only used if preprocessing == 'duplication'\n ):\n self.input_dims = input_dims\n self.n_lay = n_layers\n self.n_lev = self.n_lay + 1\n self.out_dim = None\n self.as_string = \"\"\n self.preprocessing_type = preprocessing.lower()\n self.projector_hidden_dim = projector_hidden_dim\n self.projector_n_layers = projector_n_layers\n self.projector_net_normalization = projector_net_normalization\n self.use_level_features = use_level_features\n if not self.use_level_features:\n log.info(' Dropping level features!')\n\n if self.preprocessing_type in self.ONLY_LAYER_NODES:\n self.n_nodes = self.n_lay\n self.LAYER_NODES = slice(0, self.n_nodes) # all nodes are layer nodes\n if self.preprocessing_type in ['duplication']:\n self.drop_node_encoding = drop_node_encoding\n self.node_encoding_len = node_encoding_len\n self.drop_last_level = drop_last_level\n elif self.preprocessing_type in self.ONLY_LEVEL_NODES:\n self.n_nodes = self.n_lev\n self.LEVEL_NODES = slice(0, self.n_nodes) # all nodes are layer nodes\n else: # use global node\n self.n_nodes = self.n_lev + self.n_lay + 1 if self.use_level_features else self.n_lay + 1\n self.GLOBAL_NODE = 0\n if self.use_level_features:\n self.LEVEL_NODES = slice(1, self.n_nodes, 2) # start at 1, then every second, [1, 3, 5, 7,...]\n self.LAYER_NODES = slice(2, self.n_nodes, 2) # [2, 4, 6,...]\n else:\n self.LAYER_NODES = slice(1, self.n_nodes) # [1, 2, 3,...]\n\n log.info(f\" Inferred number of nodes/spatial dimension: {self.n_nodes}\")\n self.FEATURE_DIM_IN = 2 # dim 1 is the node dimension\n\n def get_preprocesser(self, batched: bool = False, verbose: bool = True):\n if self.preprocessing_type == 'duplication':\n preprocesser = self.duplicate_features\n if not self.use_level_features:\n self.out_dim = sum([self.input_dims[GLOBALS], self.input_dims[LAYERS]])\n else:\n self.out_dim = sum(\n [self.input_dims[GLOBALS], self.input_dims[LAYERS], self.input_dims[LEVELS]]\n )\n if self.drop_node_encoding:\n self.out_dim -= 2 * self.node_encoding_len if not self.use_level_features else 3 * self.node_encoding_len\n self.as_string = 'Duplicate global features at all layers'\n\n elif self.preprocessing_type == 'padding':\n preprocesser = self.pad_features\n self.out_dim = max(*[var_indim for var_indim in self.input_dims.keys()])\n self.as_string = 'Padding all var types to have same #features'\n\n elif self.preprocessing_type in ['mlp', 'mlp_projection']:\n in_dims = self.input_dims.copy()\n if not self.use_level_features:\n in_dims.pop(LEVELS)\n preprocesser = FeatureProjector(\n input_name_to_feature_dim=in_dims,\n projector_n_layers=self.projector_n_layers,\n projection_dim=self.projector_hidden_dim,\n projector_activation_func='Gelu',\n projector_net_normalization=self.projector_net_normalization,\n output_normalization=True,\n output_activation_function=False,\n projections_aggregation=self.intersperse if self.use_level_features else self.intersperse_no_levels)\n self.out_dim = self.projector_hidden_dim\n self.as_string = f'All var types are MLP-projected to a {self.projector_hidden_dim} hidden dimension'\n elif self.preprocessing_type == 'graph_net_layer_nodes':\n if batched:\n preprocesser = gn_input_dict_renamer_layer_nodes_batched\n else:\n preprocesser = gn_input_dict_renamer_layer_nodes # just rename the dict keys\n self.out_dim = self.input_dims = {NODES: get_dict_entry(self.input_dims, [LAYERS, NODES]),\n EDGES: get_dict_entry(self.input_dims, [LEVELS, EDGES]),\n GLOBALS: get_dict_entry(self.input_dims, [GLOBALS, GLOBALS])}\n self.as_string = 'No preprocessing'\n elif self.preprocessing_type == 'graph_net_level_nodes':\n if batched:\n preprocesser = gn_input_dict_renamer_level_nodes_batched\n else:\n preprocesser = gn_input_dict_renamer_level_nodes # just rename the dict keys\n\n self.out_dim = self.input_dims = {NODES: get_dict_entry(self.input_dims, [LEVELS, NODES]),\n EDGES: get_dict_entry(self.input_dims, [LAYERS, EDGES]),\n GLOBALS: self.input_dims[GLOBALS]}\n self.as_string = 'No preprocessing'\n elif self.preprocessing_type == 'drop_levels':\n def gn_input_dict_renamer_no_levels(x: Dict[str, Tensor]):\n x[NODES] = x.pop(LAYERS)\n return x\n\n preprocesser = gn_input_dict_renamer_no_levels # just rename the dict keys\n self.out_dim = {NODES: get_dict_entry(self.input_dims, [LAYERS, NODES]),\n EDGES: 0, GLOBALS: self.input_dims[GLOBALS]}\n self.as_string = 'Dropping levels'\n elif self.preprocessing_type in [None, 'identity']:\n preprocesser = identity\n self.out_dim = self.input_dims\n self.as_string = 'No preprocessing'\n else:\n raise ValueError(f\"Preprocessing type {self.preprocessing_type} not known.\")\n if verbose and preprocesser != identity:\n s = f' {self.preprocessing_type} pre-processing'\n if 'No preprocessing' not in self.as_string:\n s += f': {self.as_string}'\n log.info(s)\n return preprocesser\n\n def get_adj(self, degree_normalized: bool = False, improved: bool = False) -> Tensor:\n \"\"\" Adjacency matrix of a line graph with global node and self-loops \"\"\"\n adj = torch.zeros((self.n_nodes, self.n_nodes))\n\n if hasattr(self, 'GLOBAL_NODE'):\n adj[:, self.GLOBAL_NODE] = 1\n adj[self.GLOBAL_NODE, :] = 1\n\n for i in range(1, self.n_nodes):\n adj[i, i - 1:i + 2] = 1\n adj[i - 1:i + 2, i] = 1\n\n if degree_normalized:\n print(\"------------------------> DEGREE NORMED A\", improved)\n return normalize_adjacency_matrix_torch(adj, improved=improved, add_self_loops=True)\n return adj\n\n def get_edge_idxs(self) -> Tuple[np.ndarray, np.ndarray]:\n assert self.preprocessing_type in ['graph_net_level_nodes', 'graph_net_layer_nodes']\n # one-way: node i has an edge to node i+1\n one_way_senders = np.arange(self.n_nodes - 1) # n_lay - 1 = n_lev - 2 edges\n one_way_receivers = one_way_senders + 1\n # one-way: node i+1 has an edge to node i\n other_way_senders = np.arange(1, self.n_nodes)\n other_way_receivers = other_way_senders - 1\n\n senders = np.concatenate((one_way_senders, other_way_senders))\n receivers = np.concatenate((one_way_receivers, other_way_receivers))\n\n return senders, receivers\n\n def intersperse(self,\n name_to_array: Optional[Dict[str, Tensor]] = None,\n global_node: Optional[Tensor] = None, # shape (b, #feats)\n levels: Optional[Tensor] = None, # shape (b, #levels, #feats)\n layers: Optional[Tensor] = None # shape (b, #layers, #feats)\n ) -> Tensor:\n \"\"\"\n Either name_to_array dict OR ALL OF global_node, levels, layers mustnt be None\n \"\"\"\n global_node, levels, layers = self.get_data_types(name_to_array, global_node, levels, layers)\n\n if global_node.shape[-1] != layers.shape[-1] or levels.shape[-1] != layers.shape[-1]:\n raise ValueError(\"Expected all node types to have same dimensions. Project them first or pad them instead!\")\n\n batch_size, _, n_feats = levels.shape\n\n interspersed_data = torch.empty((batch_size, self.n_nodes, n_feats))\n interspersed_data[:, self.GLOBAL_NODE, :] = global_node\n interspersed_data[:, self.LEVEL_NODES, :] = levels\n interspersed_data[:, self.LAYER_NODES, :] = layers\n\n interspersed_data = interspersed_data.to(global_node.device)\n return interspersed_data\n\n def intersperse_no_levels(self,\n name_to_array: Optional[Dict[str, Tensor]] = None,\n global_node: Optional[Tensor] = None, # shape (b, #feats)\n layers: Optional[Tensor] = None, **kwargs # shape (b, #layers, #feats)\n ) -> Tensor:\n \"\"\"\n Either name_to_array dict OR ALL OF global_node, layers mustnt be None\n \"\"\"\n global_node, _, layers = self.get_data_types(name_to_array, global_node, None, layers)\n\n if global_node.shape[-1] != layers.shape[-1]:\n raise ValueError(\"Expected all node types to have same dimensions. Project them first or pad them instead!\")\n\n batch_size, _, n_feats = layers.shape\n interspersed_data = torch.empty((batch_size, self.n_nodes, n_feats))\n interspersed_data[:, self.GLOBAL_NODE, :] = global_node\n interspersed_data[:, self.LAYER_NODES, :] = layers\n interspersed_data = interspersed_data.to(global_node.device)\n return interspersed_data\n\n def pad_features(self,\n name_to_array: Optional[Dict[str, Tensor]] = None,\n global_node: Optional[Tensor] = None, # shape (b, #feats)\n levels: Optional[Tensor] = None, # shape (b, #levels, #feats)\n layers: Optional[Tensor] = None, # shape (b, #layers, #feats)\n padding: float = 0.0\n ) -> Tensor:\n \"\"\"\n Either name_to_array dict OR ALL OF global_node, levels, layers mustnt be None\n \"\"\"\n global_node, levels, layers = self.get_data_types(name_to_array, global_node, levels, layers)\n\n data_size = global_node.shape[0]\n n_global_feats, n_level_feats, n_layer_feats = global_node.shape[-1], levels.shape[-1], layers.shape[-1]\n max_features = max(n_global_feats, n_level_feats, n_layer_feats)\n\n padded_data = torch.ones((data_size, self.n_nodes, max_features))\n padded_data *= padding # set all values to padding by default\n padded_data[:, self.GLOBAL_NODE, :n_global_feats] = global_node\n padded_data[:, self.LEVEL_NODES, :n_level_feats] = levels\n padded_data[:, self.LAYER_NODES, :n_layer_feats] = layers\n\n padded_data = padded_data.to(global_node.device)\n return padded_data\n\n def duplicate_features(\n self,\n name_to_array: Optional[Dict[str, Tensor]] = None,\n global_node: Optional[Tensor] = None, # shape (b, #feats)\n levels: Optional[Tensor] = None, # shape (b, #levels, #feats)\n layers: Optional[Tensor] = None, # shape (b, #layers, #feats)\n ) -> Tensor:\n \"\"\" Duplicate global (and optionally level) features across all layers\n\n level_policy: If 'drop', level features are simply dropped,\n If 'concat', all levels but one are concatenated to its adjacent layer\n drop_last_level: Only used if level_policy is concat, i.e. all levels but one are concatenated to the layers.\n\n Returns:\n A (b, #layers, d) array/Tensor,\n where d = #layer-feats + #global-feats (+ #levels-feats, if not drop_levels)\n \"\"\"\n global_node, levels, layers = self.get_data_types(name_to_array, global_node, levels, layers)\n\n if self.drop_node_encoding:\n global_node = global_node[:, :-self.node_encoding_len]\n layers = layers[:, :, :-self.node_encoding_len]\n levels = levels[:, :, :-self.node_encoding_len]\n\n data_size, n_layers, n_layer_feats = layers.shape\n n_global_feats, n_level_feats = global_node.shape[-1], levels.shape[-1]\n n_layers_feats_with_duplicates = n_layer_feats + n_global_feats\n # print(data_size, n_layers, n_layer_feats, n_global_feats, n_level_feats, n_layers_feats_with_duplicates)\n if self.use_level_features:\n n_layers_feats_with_duplicates += n_level_feats\n\n all_data = torch.empty((data_size, n_layers, n_layers_feats_with_duplicates))\n all_data[:, :, :n_layer_feats] = layers\n all_data[:, :, n_layer_feats:n_layer_feats + n_global_feats] = global_node[:, None, :]\n if self.use_level_features:\n # Will concat level features to adjacent layers, but will drop information from one (the first/last) level!\n for i in range(n_layers):\n level_i = i if self.drop_last_level else i + 1\n all_data[:, i, n_layer_feats + n_global_feats:] = levels[:, level_i, :]\n\n all_data = all_data.to(global_node.device)\n return all_data\n\n def get_data_types(\n self,\n name_to_array: Optional[Dict[str, Tensor]] = None,\n global_node: Optional[Tensor] = None, # shape (b, #feats)\n levels: Optional[Tensor] = None, # shape (b, #levels, #feats)\n layers: Optional[Tensor] = None, # shape (b, #layers, #feats)\n ) -> Tuple[Tensor, Tensor, Tensor]:\n if name_to_array is not None:\n global_node = name_to_array[GLOBALS]\n levels = name_to_array[LEVELS] if LEVELS in name_to_array.keys() else None\n layers = name_to_array[LAYERS]\n\n return global_node, levels, layers\n\n def get_output_mask(self) -> Tensor:\n # GCN output shape out = (n, self.n_nodes, gcn-out-dim)\n # in GCN gcn-out-dim = 2\n # out[:, get_output_mask(), :] has shape (n, len(self.LEVEL_NODES), gcn-out-dim)\n if not hasattr(self, 'LEVEL_NODES') or self.preprocessing_type in self.ONLY_LEVEL_NODES:\n return torch.ones(self.n_nodes).bool()\n else:\n level_mask = torch.zeros(self.n_nodes)\n level_mask[self.LEVEL_NODES] = 1\n return level_mask.bool()\n\n def __str__(self):\n return self.as_string\n\n\ndef gn_input_dict_renamer_layer_nodes(x: Dict[str, Tensor]):\n x[NODES] = x.pop(LAYERS)\n x[EDGES] = x.pop(LEVELS)[1:-1, :] # remove the surface and toa level\n x[EDGES] = repeat(x[EDGES], \"e d -> (repeat e) d\", repeat=2) # bidirectional edges\n # x[EDGES] = repeat(x[EDGES], \"b e d -> b (repeat e) d\", repeat=2) # bidirectional edges\n return x\n\n\ndef gn_input_dict_renamer_layer_nodes_batched(x: Dict[str, Tensor]):\n x[NODES] = x.pop(LAYERS)\n x[EDGES] = x.pop(LEVELS)[:, 1:-1, :] # remove the surface and toa level\n x[EDGES] = repeat(x[EDGES], \"b e d -> b (repeat e) d\", repeat=2) # bidirectional edges\n return x\n\n\ndef gn_input_dict_renamer_level_nodes(x: Dict[str, Tensor]):\n x[NODES] = x.pop(LEVELS)\n x[EDGES] = x.pop(LAYERS)\n x[EDGES] = repeat(x[EDGES], \"e d -> (repeat e) d\", repeat=2) # bidirectional edges\n return x\n\n\ndef gn_input_dict_renamer_level_nodes_batched(x: Dict[str, Tensor]):\n x[NODES] = x.pop(LEVELS)\n x[EDGES] = x.pop(LAYERS)\n # x[GLOBALS] = x.pop(GLOBALS)\n x[EDGES] = repeat(x[EDGES], \"b e d -> b (repeat e) d\", repeat=2) # bidirectional edges\n return x\n" ]
[ [ "torch.zeros", "numpy.concatenate", "torch.ones", "numpy.arange", "torch.empty" ] ]
TariqAHassan/byol-pytorch
[ "7be5b87b7dfd41eec8a1b1c2d44b0211a30673da" ]
[ "byol_pytorch/byol_pytorch.py" ]
[ "import copy\nimport random\nfrom functools import wraps\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom torchvision import transforms as T\n\n# helper functions\n\ndef default(val, def_val):\n return def_val if val is None else val\n\ndef flatten(t):\n return t.reshape(t.shape[0], -1)\n\ndef singleton(cache_key):\n def inner_fn(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n instance = getattr(self, cache_key)\n if instance is not None:\n return instance\n\n instance = fn(self, *args, **kwargs)\n setattr(self, cache_key, instance)\n return instance\n return wrapper\n return inner_fn\n\ndef get_module_device(module):\n return next(module.parameters()).device\n\ndef set_requires_grad(model, val):\n for p in model.parameters():\n p.requires_grad = val\n\n# loss fn\n\ndef loss_fn(x, y):\n x = F.normalize(x, dim=-1, p=2)\n y = F.normalize(y, dim=-1, p=2)\n return 2 - 2 * (x * y).sum(dim=-1)\n\n# augmentation utils\n\nclass RandomApply(nn.Module):\n def __init__(self, fn, p):\n super().__init__()\n self.fn = fn\n self.p = p\n def forward(self, x):\n if random.random() > self.p:\n return x\n return self.fn(x)\n\ndef _make_default_aug(image_size):\n # default SimCLR augmentation\n return torch.nn.Sequential(\n RandomApply(\n T.ColorJitter(0.8, 0.8, 0.8, 0.2),\n p=0.3\n ),\n T.RandomGrayscale(p=0.2),\n T.RandomHorizontalFlip(),\n RandomApply(\n T.GaussianBlur((3, 3), (1.0, 2.0)),\n p=0.2\n ),\n T.RandomResizedCrop((image_size, image_size)),\n T.Normalize(\n mean=torch.tensor([0.485, 0.456, 0.406]),\n std=torch.tensor([0.229, 0.224, 0.225])),\n )\n\n# exponential moving average\n\nclass EMA():\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_average(self, old, new):\n if old is None:\n return new\n return old * self.beta + (1 - self.beta) * new\n\ndef update_moving_average(ema_updater, ma_model, current_model):\n for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\n old_weight, up_weight = ma_params.data, current_params.data\n ma_params.data = ema_updater.update_average(old_weight, up_weight)\n\n# MLP class for projector and predictor\n\nclass MLP(nn.Module):\n def __init__(self, dim, projection_size, hidden_size = 4096):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_size),\n nn.BatchNorm1d(hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, projection_size)\n )\n\n def forward(self, x):\n return self.net(x)\n\n# a wrapper class for the base neural network\n# will manage the interception of the hidden layer output\n# and pipe it into the projecter and predictor nets\n\nclass NetWrapper(nn.Module):\n def __init__(self, net, projection_size, projection_hidden_size, layer = -2):\n super().__init__()\n self.net = net\n self.layer = layer\n\n self.projector = None\n self.projection_size = projection_size\n self.projection_hidden_size = projection_hidden_size\n\n self.hidden = {}\n self.hook_registered = False\n\n def _find_layer(self):\n if type(self.layer) == str:\n modules = dict([*self.net.named_modules()])\n return modules.get(self.layer, None)\n elif type(self.layer) == int:\n children = [*self.net.children()]\n return children[self.layer]\n return None\n\n def _hook(self, _, input, output):\n device = input[0].device\n self.hidden[device] = flatten(output)\n\n def _register_hook(self):\n layer = self._find_layer()\n assert layer is not None, f'hidden layer ({self.layer}) not found'\n handle = layer.register_forward_hook(self._hook)\n self.hook_registered = True\n\n @singleton('projector')\n def _get_projector(self, hidden):\n _, dim = hidden.shape\n projector = MLP(dim, self.projection_size, self.projection_hidden_size)\n return projector.to(hidden)\n\n def get_representation(self, x):\n if self.layer == -1:\n return self.net(x)\n\n if not self.hook_registered:\n self._register_hook()\n\n self.hidden.clear()\n _ = self.net(x)\n hidden = self.hidden[x.device]\n self.hidden.clear()\n\n assert hidden is not None, f'hidden layer {self.layer} never emitted an output'\n return hidden\n\n def forward(self, x, return_projection = True):\n representation = self.get_representation(x)\n\n if not return_projection:\n return representation\n\n projector = self._get_projector(representation)\n projection = projector(representation)\n return projection, representation\n\n# main class\n\nclass BYOL(nn.Module):\n def __init__(\n self,\n net,\n image_size,\n channels = 3,\n hidden_layer = -2,\n projection_size = 256,\n projection_hidden_size = 4096,\n augment_fn = None,\n augment_fn2 = None,\n moving_average_decay = 0.99,\n use_momentum = True,\n ):\n super().__init__()\n self.net = net\n\n self.augment1 = augment_fn or _make_default_aug(image_size)\n self.augment2 = default(augment_fn2, self.augment1)\n\n self.online_encoder = NetWrapper(net, projection_size, projection_hidden_size, layer=hidden_layer)\n\n self.use_momentum = use_momentum\n self.target_encoder = None\n self.target_ema_updater = EMA(moving_average_decay)\n\n self.online_predictor = MLP(projection_size, projection_size, projection_hidden_size)\n\n # get device of network and make wrapper same device\n device = get_module_device(net)\n self.to(device)\n\n # send a mock image tensor to instantiate singleton parameters\n self.forward(torch.randn(2, channels, image_size, image_size, device=device))\n\n @singleton('target_encoder')\n def _get_target_encoder(self):\n target_encoder = copy.deepcopy(self.online_encoder)\n set_requires_grad(target_encoder, False)\n return target_encoder\n\n def reset_moving_average(self):\n del self.target_encoder\n self.target_encoder = None\n\n def update_moving_average(self):\n assert self.use_momentum, 'you do not need to update the moving average, since you have turned off momentum for the target encoder'\n assert self.target_encoder is not None, 'target encoder has not been created yet'\n update_moving_average(self.target_ema_updater, self.target_encoder, self.online_encoder)\n\n def forward(\n self,\n x,\n return_embedding = False,\n return_projection = True\n ):\n if return_embedding:\n return self.online_encoder(x, return_projection = return_projection)\n\n image_one, image_two = self.augment1(x), self.augment2(x)\n\n online_proj_one, _ = self.online_encoder(image_one)\n online_proj_two, _ = self.online_encoder(image_two)\n\n online_pred_one = self.online_predictor(online_proj_one)\n online_pred_two = self.online_predictor(online_proj_two)\n\n with torch.no_grad():\n target_encoder = self._get_target_encoder() if self.use_momentum else self.online_encoder\n target_proj_one, _ = target_encoder(image_one)\n target_proj_two, _ = target_encoder(image_two)\n target_proj_one.detach_()\n target_proj_two.detach_()\n\n loss_one = loss_fn(online_pred_one, target_proj_two.detach())\n loss_two = loss_fn(online_pred_two, target_proj_one.detach())\n\n loss = loss_one + loss_two\n return loss.mean()\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.normalize", "torch.no_grad", "torch.nn.ReLU", "torch.nn.BatchNorm1d", "torch.tensor", "torch.randn" ] ]
gregw18/KaggleAirbusShipDetection
[ "efe0b872d5460da5b8650785a046c805bc0a9918" ]
[ "CreateSegMasks.py" ]
[ "# Create mask file for each source file, using data from segmentation file, which is RLE.\r\n# In this case, RLE means that there is one line in the file for each boat in a file. Each line\r\n# consists of a number of pairs. The first element in the pair is the starting pixel number and the\r\n# second is the number of pixels that are also part of that boat. The pixel numbers start at 0 in the\r\n# top left corner of the image then go down before going across.\r\n# Masks will be black where objects are - 255 for each channel - and 0 everwhere else.\r\n# November 20, 2018 change. Since second sample network is scaling and centering masks, to\r\n# have values of either 0 or 1, am creating new function here to create mask of 0 for no boat,\r\n# 1 for boat, so can skip the mask normalization when running the network, hopefully making\r\n# target clearer.\r\n# Modified May 8, 2019 to save masks as png, not jpg, to avoid jpeg compression losses.\r\n\r\n\r\nimport os\r\nimport matplotlib.image as mpimg\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math\r\n\r\n\r\n# Globals\r\n# Live Values\r\nbase_dir = '/home/ubuntu/notebooks/kaggle/AirbusShipId/data'\r\n#base_dir = '/home/ubuntu/fastai/practical/exercises/lesson1/ocean/train/bigboats'\r\nlabelsFile = 'train_ship_segmentations_v2.csv' # File containing full data\r\n\r\n# Test Values\r\n#base_dir = '/home/ubuntu/notebooks/kaggle/AirbusShipId/test'\r\n#base_dir = 'E:/code/kaggle/Airbus ShipId/data'\r\n#base_dir = 'E:/code/kaggle/Airbus ShipId/segment_progtest'\r\n#labelsFile = 'train_ship_segmentations_small_test.csv' # File containing first 202 rows, for testing reading file.\r\n#base_dir= '/home/gregw/code/Kaggle/ShipId'\r\n\r\ntrain_dir = os.path.join(base_dir, 'train/images')\r\nvalidate_dir = os.path.join(base_dir, 'validate/images')\r\ntest_dir = os.path.join(base_dir, 'test/images')\r\n\r\n#test_dir= '/home/ubuntu/fastai/practical/exercises/lesson1/ocean/train/bigboats'\r\nimg_size = 768\r\nimg_shape = (img_size, img_size, 3)\r\n\r\nsegResultsFilename = os.path.join(base_dir, labelsFile)\r\n\r\n\r\ndef getFileToDirMap(img_dirs):\r\n # Create mapping from filename to directory name, for each file in each directory in given collection.\r\n file_map = {}\r\n for src_dir in img_dirs:\r\n srcFiles = [name.lower() for name in os.listdir(src_dir)\r\n if os.path.isfile(os.path.join(src_dir, name))]\r\n num_files = len(srcFiles)\r\n print( \"For directory \", src_dir, \", working on \", num_files, \" files.\")\r\n for name in srcFiles:\r\n if not ( name in file_map):\r\n file_map[name] = src_dir\r\n else:\r\n print(\"found duplicate filename: \", name, \" in directory: \", src_dir)\r\n\r\n return file_map\r\n\r\n\r\ndef createMasks(file_to_dir_map):\r\n # Create a mask for each image, in parallel directory called labels.\r\n # Note that read_csv seems to automatically skip the header row at the top of the file.\r\n segResults = pd.read_csv(segResultsFilename, sep=',', index_col='ImageId')\r\n print(\"1. segResults.shape\", segResults.shape)\r\n this_mask = np.zeros(img_shape, dtype = np.uint8)\r\n last_filename = 'zzz'\r\n n = 1\r\n for row in segResults.itertuples():\r\n print(row[0], \", \", row[1])\r\n this_filename = row[0].lower()\r\n # Extra check for testing - don't bother creating mask if not going to save this file, because\r\n # it isn't in the testing directories.\r\n if this_filename in file_to_dir_map:\r\n if not (this_filename == last_filename):\r\n if not (last_filename == 'zzz'):\r\n saveMaskAsPng(this_mask, last_filename, file_to_dir_map)\r\n this_mask = np.zeros(img_shape, dtype=np.uint8)\r\n last_filename = this_filename\r\n if not pd.isnull(row[1]):\r\n pixels = getPixels(row[1])\r\n applyPixelsBinary(pixels, this_mask )\r\n n += 1\r\n #if n > 40: break\t# Used for testing, so don't have to go through all images.\r\n\r\n # Save last file.\r\n saveMaskAsPng(this_mask, last_filename, file_to_dir_map)\r\n\r\n\r\ndef getPixels(src_line):\r\n # Data in file is pixel number, number of pixels pairs, all space delimited.\r\n # Want to return array of pixel locations (row, column) which are boat.\r\n\r\n boatPixels = []\r\n list = src_line.split(\" \" )\r\n #print( 'list: ', list )\r\n for i in range(0, len(list), 2):\r\n pixel1 = getPixel(list[i])\r\n\r\n # After finding row, col coordinates of given pixel number, store it, and next n pixels,\r\n # where n is the next item in list.\r\n #print( \"adding \", list[i+1], \" pixels.\" )\r\n for j in range(int(list[i+1])):\r\n boatPixels.append([pixel1[0], pixel1[1]])\r\n pixel1[0] += 1\r\n\r\n return boatPixels\r\n\r\n\r\ndef getPixel(pixel_num):\r\n # Convert a pixel number to a row, col coordinate.\r\n # Assuming picture size of 768*768. Turns out that they count down and then\r\n # across, so pixel 2 is row 1, column 0. Also, since first pixel is 1 rather than 0,\r\n # have to decrement before start arithmetic.\r\n nPixelNum = int(pixel_num) - 1\r\n y = math.trunc(nPixelNum / img_size)\r\n x = nPixelNum % img_size\r\n\r\n return [x, y]\r\n\r\n\r\ndef applyPixelsBinary( pixels, mask ):\r\n # Change all channels for specified pixels to 1, in given mask.\r\n # Changes provided mask.\r\n for row, col in pixels:\r\n mask[row, col] = [1, 1, 1]\r\n\r\n\r\ndef saveMaskAsPng(this_mask, filename, file_to_dir_map):\r\n # Receives b&w mask, name of source jpg file, directory original jpg file was in.\r\n # Save mask as png in labels dir next to directory original image was in.\r\n if filename in file_to_dir_map:\r\n src_dir = file_to_dir_map[filename]\r\n dest_dir, zz = os.path.split(src_dir)\r\n dest_dir = os.path.join(dest_dir, 'labels')\r\n ensureDirExists(dest_dir)\r\n base, ext = os.path.splitext(filename)\r\n new_filename = base + \".png\"\r\n dest_filename = os.path.join(dest_dir, new_filename)\r\n mpimg.imsave(dest_filename, this_mask, format='png')\r\n else:\r\n print( \"Unable to find file to directory mapping for file \", filename)\r\n\r\n\r\ndef ensureDirExists(targetDir) :\r\n # Ensure that given directory exists. (Create it if it doesn't.)\r\n\r\n if not ( os.path.isdir(targetDir)):\r\n os.makedirs(targetDir)\r\n return\r\n\r\n\r\ndef createEmptyMask():\r\n this_mask = np.zeros(img_shape, dtype = np.uint8)\r\n mpimg.imsave('empty.png', this_mask, format='png')\r\n\r\n\r\n#img_dirs = [test_dir]\r\nimg_dirs = [train_dir, validate_dir, test_dir]\r\n\r\n# Create dictionary to map from image file name to directory where it is located.\r\nprint ('Started creating file to dir map.')\r\nfile_to_dir_map = getFileToDirMap(img_dirs)\r\nprint ('Finished creating file to dir map.')\r\ncreateMasks(file_to_dir_map)\r\n\r\n#createEmptyMask()\r\n\r\n\r\nprint(\"complete\")\r\n" ]
[ [ "pandas.isnull", "pandas.read_csv", "matplotlib.image.imsave", "numpy.zeros" ] ]
daleroberts/QGIS-Classifier-Plugin
[ "c0e302a98c7370115dc6c8ac2b2fdd48ed6d816f" ]
[ "classify/ClassifyMethod.py" ]
[ "import numpy as np\nimport math\nimport inspect\nfrom collections import namedtuple\n\n# Need to use QObject.tr on method name, description\n\nclass ClassifyMethodError( RuntimeError ):\n def message(self):\n return self.args[0] if len(self.args) > 0 else \"Exception\"\n\nContourMethod=namedtuple('ContourMethod','id name calc required optional description')\n\nmethods=[]\n\ntr=lambda x: x\n\ndef _numberListParam( param, list ):\n if isinstance(list,str):\n list=list.split()\n values=[]\n v0=None\n for v in list:\n try:\n v=float(v)\n except ValueError:\n raise ClassifyMethodError(tr('Invalid value {0} in {1}').format(vs,param))\n if v0 is not None and v <= v0:\n raise ClassifyMethodError(tr('Values not increasing in {0}').format(param))\n values.append(v)\n v0=v\n return np.array(values)\n\ndef _paramValue( pt, param, value ):\n try:\n value=pt(value)\n except:\n raise ClassifyMethodError(tr('Invalid value for contour {0} parameter: {1}')\n .format(param,value))\n return value\n\n_floatParam=lambda p,v: _paramValue(float,p,v)\n_intParam=lambda p,v: _paramValue(int,p,v)\n\n_paramtypes={\n 'min': _floatParam,\n 'max': _floatParam,\n 'ncontour': _intParam,\n 'maxcontour': _intParam,\n 'interval': _floatParam,\n 'offset': _floatParam,\n 'levels': _numberListParam,\n 'mantissa': _numberListParam,\n }\n\ndef _evalParam(p,v):\n if p not in _paramtypes:\n raise ClassifyMethodError(tr('Invalid contour method parameter {0}').format(p))\n return _paramtypes[p](p,v)\n\ndef _sortedLevels(levels):\n levels=np.array(levels)\n levels.sort()\n diff=np.ones(levels.shape)\n diff[1:]=levels[1:]-levels[:-1]\n levels=levels[diff > 0]\n return levels\n\ndef _methodFunc(z,f,name,req,opt,kwa):\n pav=[]\n kwv={}\n for k in req:\n if k not in kwa:\n raise ClassifyMethodError(tr('Parameter {0} missing in {1}').format(k,name))\n pav.append(_evalParam(k,kwa[k]))\n for k in opt:\n v=kwa.get(k)\n if v is not None:\n kwv[k]=_evalParam(k,v)\n return _sortedLevels(f(z,*pav,**kwv))\n\ndef contourmethod(id=None,name=None,description=None):\n def mf2( f ):\n nonlocal id, name, description\n if id is None: \n id=f.__name__\n if name is None:\n name=id\n if description is None:\n description=f.__doc__\n sig=inspect.signature(f)\n req=[]\n opt=[]\n for pn in sig.parameters:\n p=sig.parameters[pn]\n if p.kind == inspect.Parameter.POSITIONAL_ONLY:\n req.append(pn)\n else:\n opt.append(pn)\n func=lambda z,**kwa: _methodFunc(z,f,name,req,opt,kwa)\n methods.append(ContourMethod(id,name,func,req,opt,description))\n return func\n return mf2\n\n\ndef _range( z, min, max ):\n zmin = min if min is not None else np.min(z)\n zmax = max if max is not None else np.max(z)\n return zmin, zmax\n\n@contourmethod('equal','N equal intervals')\ndef calcEqualContours( z, ncontour, min=None, max=None ):\n 'Equally spaced contours between min and max'\n zmin,zmax=_range(z,min,max)\n if zmax <= zmin:\n raise ClassifyMethodError(tr('Invalid contour range - zmin=zmax'))\n if ncontour < 1:\n raise ClassifyMethodError(tr('Invalid number of contours - must be greater than 0'))\n return np.linspace(zmin,zmax,ncontour+1)\n\n@contourmethod('quantile','N quantiles')\ndef calcQuantileContours( z, ncontour, min=None, max=None ):\n 'Contours at percentiles of data distribution between min and max'\n if min is not None:\n z=z[z >= min]\n if max is not None:\n z=z[z <= max]\n if len(z) < 2:\n raise ClassifyMethodError(tr('Not enough z values to calculate quantiles'))\n if ncontour < 1:\n raise ClassifyMethodError(tr('Invalid number of contours - must be greater than 0'))\n pcnt=np.linspace(0.0,100.0,ncontour+1)\n return np.percentile(z,pcnt)\n \n\n@contourmethod('log','Logarithmic intervals')\ndef calcLogContours( z, ncontour, min=None, max=None, mantissa=[1,2,5] ):\n 'Contours at up to n values 1, 2, 5 * 10^n between min and max'\n zmin,zmax=_range(z,min,max)\n if ncontour < 1:\n raise ClassifyMethodError(tr('Invalid number of contours - must be greater than 0'))\n for m in mantissa:\n if m < 1.0 or m >= 10.0:\n raise ClassifyMethodError(tr('Log contour mantissa must be between 1 and 10'))\n if zmax <= 0:\n raise ClassifyMethodError(tr('Cannot make log spaced contours on negative or 0 data'))\n if zmin <= 0:\n zmin=zmax/(10**(math.ceil(float(ncontour)/len(mantissa))))\n exp0=int(math.floor(math.log10(zmin)))\n exp1=int(math.ceil(math.log10(zmax)))\n levels=[m*10**e for e in range(exp0,exp1+1) for m in mantissa]\n for i,v in enumerate(levels):\n if v > zmin:\n break\n if i > 1:\n levels=levels[i-1:]\n levels.reverse()\n for i,v in enumerate(levels):\n if v < zmax:\n break\n if i > 1:\n levels=levels[i-1:]\n levels.reverse()\n if len(levels) > ncontour:\n if min is not None and max is None:\n levels=levels[:ncontour]\n else:\n levels=levels[-ncontour:]\n return levels\n\n@contourmethod('interval','Fixed contour interval')\ndef calcIntervalContours( z, interval, offset=0.0,min=None, max=None, maxcontour=50):\n 'Contours at specified spacing between min and max'\n if interval <= 0:\n raise ClassifyMethodError(tr(\"Contour interval must be greater than zero\"))\n zmin,zmax=_range(z,min,max)\n zmin -= offset\n zmax -= offset\n nmin=np.floor(zmin/interval)\n nmax=np.ceil(zmax/interval)\n if nmax == nmin:\n nmax += 1\n nmax += 1\n if nmax-nmin >= maxcontour:\n raise ClassifyMethodError(tr(\"Number of contours ({0}) exceeds maximum allowed ({1})\")\n .format(nmax-nmin,maxcontour))\n return np.arange(nmin,nmax)*interval+offset\n\n@contourmethod('manual','User selected contour levels')\ndef parseContours( z, levels ):\n 'Contours at specified levels'\n return levels\n\ndef getMethod( id ):\n for m in methods:\n if m.id == id:\n return m\n return None\n\ndef calculateLevels( z, method, **params ):\n method=method.lower()\n m=getMethod(method)\n if m is not None:\n return m.calc(z,**params)\n raise ClassifyMethodError(\"Invalid contouring method {0}\".format(method))\n" ]
[ [ "numpy.max", "numpy.array", "numpy.ceil", "numpy.percentile", "numpy.ones", "numpy.min", "numpy.arange", "numpy.linspace", "numpy.floor" ] ]
i2kconnect/lasio
[ "03c88217244a73b7183bc343a21000c479597290" ]
[ "lasio/defaults.py" ]
[ "import re\n\nimport numpy as np\n\nfrom .las_items import (\n HeaderItem, SectionItems, OrderedDict\n)\n\n\ndef get_default_items():\n return {\n 'Version': SectionItems([\n HeaderItem('VERS', '', 2.0, 'CWLS log ASCII Standard -VERSION 2.0'),\n HeaderItem('WRAP', '', 'NO', 'One line per depth step'),\n HeaderItem('DLM', '', 'SPACE', 'Column Data Section Delimiter'),\n ]),\n 'Well': SectionItems([\n HeaderItem('STRT', 'm', np.nan, 'START DEPTH'),\n HeaderItem('STOP', 'm', np.nan, 'STOP DEPTH'),\n HeaderItem('STEP', 'm', np.nan, 'STEP'),\n HeaderItem('NULL', '', -9999.25, 'NULL VALUE'),\n HeaderItem('COMP', '', '', 'COMPANY'),\n HeaderItem('WELL', '', '', 'WELL'),\n HeaderItem('FLD', '', '', 'FIELD'),\n HeaderItem('LOC', '', '', 'LOCATION'),\n HeaderItem('PROV', '', '', 'PROVINCE'),\n HeaderItem('CNTY', '', '', 'COUNTY'),\n HeaderItem('STAT', '', '', 'STATE'),\n HeaderItem('CTRY', '', '', 'COUNTRY'),\n HeaderItem('SRVC', '', '', 'SERVICE COMPANY'),\n HeaderItem('DATE', '', '', 'DATE'),\n HeaderItem('UWI', '', '', 'UNIQUE WELL ID'),\n HeaderItem('API', '', '', 'API NUMBER')\n ]),\n 'Curves': SectionItems([]),\n 'Parameter': SectionItems([]),\n 'Other': '',\n 'Data': np.zeros(shape=(0, 1)),\n }\n\n\nORDER_DEFINITIONS = {\n 1.2: OrderedDict([\n ('Version', ['value:descr']),\n ('Well', [\n 'descr:value',\n ('value:descr', ['STRT', 'STOP', 'STEP', 'NULL'])]),\n ('Curves', ['value:descr']),\n ('Parameter', ['value:descr']),\n ]),\n 2.0: OrderedDict([\n ('Version', ['value:descr']),\n ('Well', ['value:descr']),\n ('Curves', ['value:descr']),\n ('Parameter', ['value:descr'])\n ])}\n\nDEPTH_UNITS = {\n 'FT': (\"FT\", \"F\", \"FEET\", \"FOOT\"),\n 'M': (\"M\", \"METER\", \"METERS\", \"METRE\", \"METRES\"),\n }\n\nREAD_POLICIES = {\n 'default': ['comma-decimal-mark', 'run-on(-)', 'run-on(.)', 'run-on(NaN.)'],\n }\n\nREAD_SUBS = {\n 'comma-decimal-mark': [(re.compile(r'(\\d),(\\d)'), r'\\1.\\2'), ],\n 'run-on(-)': [(re.compile(r'(\\d)-(\\d)'), r'\\1 -\\2'), ],\n 'run-on(.)': [(re.compile(r'-?\\d*\\.\\d*\\.\\d*'), ' NaN NaN '), ],\n 'run-on(NaN.)': [(re.compile(r'NaN[\\.-]\\d+'), ' NaN NaN '), ],\n }\n\nNULL_POLICIES = {\n 'none': [],\n 'strict': ['NULL', ],\n 'common': ['NULL', '(null)', '-',\n '9999.25', '999.25', 'NA', 'INF', 'IO', 'IND'],\n 'aggressive': ['NULL', '(null)', '--',\n '9999.25', '999.25', 'NA', 'INF', 'IO', 'IND',\n '999', '999.99', '9999', '9999.99' '2147483647', '32767',\n '-0.0', ],\n 'all': ['NULL', '(null)', '-',\n '9999.25', '999.25', 'NA', 'INF', 'IO', 'IND',\n '999', '999.99', '9999', '9999.99' '2147483647', '32767', '-0.0',\n 'numbers-only', ],\n 'numbers-only': ['numbers-only', ]\n }\n\nNULL_SUBS = {\n 'NULL': [None, ], # special case to be handled in LASFile.read()\n '999.25': [-999.25, 999.25],\n '9999.25': [-9999.25, 9999.25],\n '999.99': [-999.99, 999.99],\n '9999.99': [-9999.99, 9999.99],\n '999': [-999, 999],\n '9999': [-9999, 9999],\n '2147483647': [-2147483647, 2147483647],\n '32767': [-32767, 32767],\n '(null)': [(re.compile(r' \\(null\\)'), ' NaN'),\n (re.compile(r'\\(null\\) '), 'NaN '),\n (re.compile(r' \\(NULL\\)'), ' NaN'),\n (re.compile(r'\\(NULL\\) '), 'NaN '),\n (re.compile(r' null'), ' NaN'),\n (re.compile(r'null '), 'NaN '),\n (re.compile(r' NULL'), ' NaN'),\n (re.compile(r'NULL '), 'NaN '), ],\n '-': [(re.compile(r' -+ '), ' NaN '), ],\n 'NA': [(re.compile(r'(#N/A)[ ]'), 'NaN '),\n (re.compile(r'[ ](#N/A)'), ' NaN'), ],\n 'INF': [(re.compile(r'(-?1\\.#INF)[ ]'), 'NaN '),\n (re.compile(r'[ ](-?1\\.#INF[0-9]*)'), ' NaN'), ],\n 'IO': [(re.compile(r'(-?1\\.#IO)[ ]'), 'NaN '),\n (re.compile(r'[ ](-?1\\.#IO)'), ' NaN'), ],\n 'IND': [(re.compile(r'(-?1\\.#IND)[ ]'), 'NaN '),\n (re.compile(r'[ ](-?1\\.#IND[0-9]*)'), ' NaN'), ],\n '-0.0': [(re.compile(r'(-0\\.0)[ ]'), 'NaN '),\n (re.compile(r'[ ](-0\\.0)'), ' NaN'), ],\n 'numbers-only': [(re.compile(r'([^ 0-9.\\-+]+)[ ]'), 'NaN '),\n (re.compile(r'[ ]([^ 0-9.\\-+]+)'), ' NaN'), ],\n }\n\n" ]
[ [ "numpy.zeros" ] ]
leomrocha/minibrain
[ "e243f7742495c50104ee13ddc6929b1f3cacfc97" ]
[ "predictors/sequence/text/langmodels/utils/helpers.py" ]
[ "\"\"\"\nHelper functions for training and testing, for the moment many things contain absolute paths and is quite dirty\nThe thing is, I'll be improving the code as the research advances\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nfrom datetime import datetime\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .preprocess_conllu import *\n\n\n# from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks\ndef chunks(data, n, dim=0):\n \"\"\"Yield successive n-sized chunks from data by the dimension dim\"\"\"\n for i in range(0, data.shape[dim], n):\n yield data[i:i + n, :, :]\n\n\ndef pos_loss_function(upos, deprel, target_upos, target_deprel):\n # TODO check a more sofisticated loss function, for the moment only the sum to see if it runs\n # the issue is that upos is easier than deprel (18 vs 278 classes)\n # upos_loss = F.mse_loss(upos, target_upos)\n # deprel_loss = F.mse_loss(deprel, target_deprel)\n upos_loss = F.nll_loss(upos, target_upos.long())\n deprel_loss = F.nll_loss(deprel, target_deprel.long())\n # upos_loss = F.kl_div(upos, target_upos)\n # deprel_loss = F.kl_div(deprel, target_deprel)\n loss = upos_loss + deprel_loss\n # loss = F.kl_div(torch.cat([upos, deprel], dim=-1).contiguous(),\n # torch.cat([target_upos, target_deprel], dim=-1).contiguous())\n return loss, upos_loss, deprel_loss\n\n\nwriter = SummaryWriter()\n\n\ndef train(model, optimizer, loss_function, batches, epoch, ndatapoints, device, max_seq_len=-1):\n torch.cuda.empty_cache()\n model.train()\n train_loss = 0\n # batch_loss = []\n batch_idx = 1\n for b_data in batches:\n b_train = torch.from_numpy(b_data[:, 0, :].astype(\"int32\")).squeeze().to(device).long()\n # max_seq_len means I'll take care only of a part of the input to compare output. This is to save computation\n # only and not because of any other reason\n if max_seq_len > 0:\n b_upos = torch.from_numpy(b_data[:, 1, :max_seq_len].astype(\"int32\")).squeeze().to(device).long()\n b_deprel = torch.from_numpy(b_data[:, 2, :max_seq_len].astype(\"int32\")).squeeze().to(device).long()\n else:\n b_upos = torch.from_numpy(b_data[:, 1, :].astype(\"int32\")).squeeze().to(device).long()\n b_deprel = torch.from_numpy(b_data[:, 2, :].astype(\"int32\")).squeeze().to(device).long()\n #\n optimizer.zero_grad()\n dec = model(b_train)\n # last_latent = latent[-1]\n upos, deprel = dec\n # print(emb.shape,emb.dtype, res.shape, res.dtype)\n # print(upos.shape, b_upos.shape)\n # loss = loss_function(upos, deprel, upos_emb(b_upos), deprel_emb(b_deprel))\n # print(\"train tensor shapes: \", b_train.shape, upos.shape, b_upos.shape, deprel.shape, b_deprel.shape)\n loss, upos_loss, deprel_loss = loss_function(upos.view([-1, 18]), deprel.view([-1, 278]), b_upos.view([-1]), b_deprel.view([-1]))\n\n loss.backward()\n train_loss += loss.data.item() # [0]\n writer.add_scalar(\"Loss/train\", loss.data.item(), global_step=(epoch * batch_idx))\n writer.add_scalar(\"Loss/train/upos\", upos_loss.data.item(), global_step=(epoch * batch_idx))\n writer.add_scalar(\"Loss/train/deprel\", deprel_loss.data.item(), global_step=(epoch * batch_idx))\n optimizer.step()\n batch_idx += 1\n del b_train\n del b_upos\n del b_deprel\n torch.cuda.empty_cache()\n writer.add_scalar(\"EpochLoss/train\", train_loss / batch_idx, epoch)\n print('====> Timestamp {} Epoch: {} Average loss: {:.8f}'.format(datetime.now(), epoch, train_loss / ndatapoints))\n return train_loss\n\n\ndef test(model, loss_function, test_data, epoch, device, max_data=40, max_seq_len=-1):\n \"\"\"\n\n :param model:\n :param loss_function:\n :param test_data:\n :param epoch:\n :param device:\n :param max_data: maximum amout of data to test (default 50 due to gpu memory constraints in my pc)\n :return:\n \"\"\"\n model.eval()\n test_loss = 0\n for lang, d in test_data:\n torch.cuda.empty_cache() # make sure the cache is emptied to begin the nexxt batch\n b_test = torch.from_numpy(d[:max_data, 0, :].astype(\"int32\")).squeeze().to(device).long()\n if max_seq_len > 0:\n b_upos = torch.from_numpy(d[:max_data, 1, :max_seq_len].astype(\"int32\")).squeeze().to(device).int() # .long()\n b_deprel = torch.from_numpy(d[:max_data, 2, :max_seq_len].astype(\"int32\")).squeeze().to(device).int() # .long()\n else:\n b_upos = torch.from_numpy(d[:max_data, 1, :].astype(\"int32\")).squeeze().to(device).int() # .long()\n b_deprel = torch.from_numpy(d[:max_data, 2, :].astype(\"int32\")).squeeze().to(device).int() # .long()\n upos, deprel = model(b_test)\n # loss = loss_function(upos.view([-1, 18]), b_upos.view([-1]))\n # print(\"test tensor shapes: \", b_test.shape, upos.shape, b_upos.shape, deprel.shape, b_deprel.shape)\n loss, upos_loss, deprel_loss = loss_function(upos.view([-1, 18]), deprel.view([-1, 278]), b_upos.view([-1]), b_deprel.view([-1]))\n test_loss += loss.data.item()\n writer.add_scalar(\"LangLoss/test/\"+lang, loss.data.item(), global_step=epoch)\n writer.add_scalar(\"LangLoss/test/upos/\"+lang, upos_loss.data.item(), global_step=epoch)\n writer.add_scalar(\"LangLoss/test/deprel/\"+lang, deprel_loss.data.item(), global_step=epoch)\n del b_test\n del b_upos\n del b_deprel\n torch.cuda.empty_cache()\n test_loss /= len(test_data) # although this is not faire as different languages give different results\n writer.add_scalar(\"EpochLangLoss/test/\", test_loss, global_step=epoch)\n print('epoch: {}====> Test set loss: {:.8f}'.format(epoch, test_loss))\n\n\ndef test_accuracy(model, test_data, epoch, device, max_data=50):\n torch.cuda.empty_cache() # make sure the cache is emptied\n model.eval()\n epoch_acc = 0\n\n upos_eye = torch.eye(len(UPOS))\n deprel_eye = torch.eye(len(DEPREL))\n with torch.no_grad():\n upos_emb = nn.Embedding(*upos_eye.shape)\n upos_emb.weight.data.copy_(upos_eye)\n upos_emb = upos_emb.to(device)\n\n deprel_emb = nn.Embedding(*deprel_eye.shape)\n deprel_emb.weight.data.copy_(deprel_eye)\n deprel_emb.to(device)\n\n for lang, d in test_data:\n with torch.no_grad():\n b_test = torch.from_numpy(d[:max_data, 0, :].astype(\"int32\")).squeeze().to(device).long()\n # TODO move the testing part to CPU so it takes less memory in the GPU and can keep training while testing\n # doing operations in boolean form so it takes less space in gpu\n b_upos = torch.from_numpy(d[:max_data, 1, :].astype(\"bool\")).squeeze().to(device).long()\n b_deprel = torch.from_numpy(d[:max_data, 2, :].astype(\"bool\")).squeeze().to(device).long()\n _, _, _, dec = model(b_test)\n # last_latent = latent[-1]\n upos, deprel = dec\n ones = torch.ones(1).to(device)\n zeros = torch.zeros(1).to(device)\n upos = torch.where(upos > 0.9, ones, zeros).bool().to(device)\n deprel = torch.where(deprel > 0.9, ones, zeros).bool().to(device)\n upos = upos.view([-1, 18])\n deprel = deprel.view([-1, 278])\n\n # FIXME this accuracy measurement does not work.\n upos_acc = (upos == upos_emb(b_upos).view([-1, 18])).sum().item() / upos.shape[0]\n deprel_acc = (deprel == deprel_emb(b_deprel).view([-1, 278])).sum().item() / deprel.shape[0]\n acc = (upos_acc + deprel_acc) / 2\n # print(\"accuracy : \", acc, upos_acc, deprel_acc)\n writer.add_scalar(\"LangAccuracy/test/\" + lang, acc, global_step=epoch)\n writer.add_scalar(\"LangAccuracy/test/upos/\" + lang, upos_acc, global_step=epoch)\n writer.add_scalar(\"LangAccuracy/test/deprel/\" + lang, deprel_acc, global_step=epoch)\n\n del b_test\n del b_upos\n del b_deprel\n torch.cuda.empty_cache()\n epoch_acc /= len(test_data) # although this is not faire as different languages give different results\n writer.add_scalar(\"EpochLangAccuracy/test/\", epoch_acc, global_step=epoch)\n print('epoch: {}====> Test Accuracy set loss: {:.4f}'.format(epoch, acc))\n pass\n\n\ndef load_test_data(base_dir, max_samples=-1, max_seq_len=-1):\n \"\"\"finds all ud-treebank data that was pre-processed and saved in numpy and loads it.\n Each file is loaded and kept in a tuple (lang, dataset) and returns a list of those values\n if max_samples or max_seq_len are set to a nunmber greater than zero these will limit the data returned\n \"\"\"\n # load testing data ALL the training data\n\n # get all file paths for testing\n all_fnames = get_all_files_recurse(base_dir)\n fnames = [f for f in all_fnames if \"test-charse\" in f and f.endswith(\".npy\")]\n # load all test files\n test_data = []\n for f in fnames:\n data = np.load(f)\n # data is shape: [total samples, data channels (3), 1024]\n # print(\"data loading shape: \", data.shape)\n if max_seq_len > 0:\n data = data[:, :, :max_seq_len]\n if max_samples > 0:\n data = data[:max_samples, :, :]\n lang_name = path_leaf(f).split(\"-ud\")[0]\n test_data.append((lang_name, data))\n return test_data\n\n" ]
[ [ "torch.zeros", "torch.utils.tensorboard.SummaryWriter", "torch.no_grad", "numpy.load", "torch.ones", "torch.cuda.empty_cache", "torch.nn.Embedding", "torch.where" ] ]
r0leGetter/b_thesis_top
[ "d2e7533909a1104726b03d422d12be582f35f716" ]
[ "src/marginal/marginal.py" ]
[ "from abc import ABCMeta, abstractmethod\nfrom scipy.stats.kde import gaussian_kde\nfrom scipy import integrate\nfrom scipy.stats import norm\nimport pandas as pd\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nfrom matplotlib.patches import Ellipse\nimport sys\nimport numpy as np\nfrom sklearn import mixture\nimport warnings\nfrom sklearn.neighbors.kde import KernelDensity\nfrom sklearn.grid_search import GridSearchCV\nfrom numpy import atleast_2d\nfrom numpy import newaxis\nfrom scipy import integrate\nfrom statistics import variance\nfrom typing import List, Tuple, Dict\n\n#marinal object. Gaussian,Kde,..(etc)\n#ignore deprecation warining\nwarnings.filterwarnings('ignore')\n\ndef inner_import():\n #for mutual reference\n global measure\n global share\n from measuring import measure\n from sharing import shared as share\n\nclass Marginal(metaclass=ABCMeta):\n def __init__(self):\n self.option_name='option_'\n @abstractmethod\n def pdf(self, x: float) -> float:\n raise NotImplementedError\n\n def cdf(self, x: float) -> float:\n ret=integrate.quad(self.pdf,-float('inf'), x)[0]\n if math.isnan(ret):\n ret=.0\n elif ret>1:\n ret=1.0\n return ret\n \n @abstractmethod\n def set_param(self,**args):\n raise NotImplementedError\n #log_param to utils/util.py for cluster\n def get_option_name(self)->str:\n return self.option_name\n def get_marg_name(self) -> str:\n return self.marg_name\n def get_dir_name(self) -> str:\n return self.marg_name+'/'+self.option_name\n def get_param(self)->dict:\n return self.param\n\ndef factory_marg(marg_name:str,marg_option=None)->Marginal:\n if marg_name==share.KDE_CV:\n marg=KdeCv(marg_name,marg_option['cont_kernel'],marg_option['cont_space'],marg_option['cont_search_list'],marg_option['disc_kernel'],marg_option['disc_space'],disc_search_list=marg_option['disc_search_list'])\n elif marg_name==share.GAUSSIAN:\n marg=Norm(marg_name)\n else:\n sys.stderr.write('invalid marg_name')\n sys.exit(share.ERROR_STATUS)\n return marg\n\ndef tophat(x:float) -> float:\n if -1.0 <= x and x <= 1.0:\n return 0.5\n else:\n return 0.0\n\ndef get_slv_sct(data:pd.DataFrame, bw:float) -> float:\n kde=gaussian_kde(data,bw_method=bw)\n n=data.shape[0]\n #odd\n return kde._norm_factor/n/np.sqrt(2*np.pi)\n\ndef get_search_array(space_type:str,start:int,end:int,size:int,inner_size=3)->np.array:\n#return flatten([[10^start,...],...[line_space.size=inner_size],..[...10^end]) or linspace\n if space_type=='log':\n source1=np.logspace(start,end,size)\n end_point=source1[-1]\n source2=np.delete(source1,0)\n source2=np.append(source2,end_point)\n space_list=list(map(lambda x,y:list(np.linspace(x,y,inner_size,endpoint=False)),source1,source2))\n #space_list[end]=array([end_point,end_point,...])\n space_list.pop()\n search_list=[end_point]\n for space in space_list:\n search_list.extend(space)\n ret=np.array(search_list)\n elif space_type=='line':\n ret=np.linspace(start,end,size)\n else:\n sys.stderr.write('invalid space')\n sys.exit(share.ERROR_STATUS)\n return ret\n\nclass KdeCv(Marginal):\n def __init__(self,marg_name:str,cont_kernel:str,cont_space:str,cont_search_list:List[int],disc_kernel:str,disc_space:str,disc_search_list:List[int]):\n super().__init__()\n #[kernel,space,start,end,size,cv_num]\n self.marg_name=marg_name\n self.option_name=cont_kernel+'_'+cont_space\n for i in cont_search_list:\n self.option_name+='_'+str(i)\n self.option_name+='/'+disc_kernel+'_'+disc_space\n for i in disc_search_list:\n self.option_name+='_'+str(i)\n self.cont_option=[cont_kernel,cont_space,cont_search_list]\n self.disc_option=[disc_kernel,disc_space,disc_search_list]\n \n def set_param(self,**args):\n def inner_set_param(training_data: np.array,score_type:str,cv_num=3):\n self.score_type=score_type\n self.data_list=training_data\n self.cv_num=cv_num\n if score_type in share.DISC_SCORE_TYPE_LIST:\n self.option=self.disc_option\n else:\n self.option=self.cont_option\n self.kernel,space,search_list=self.option\n start,end,size=search_list\n self.search_list=get_search_array(space,start,end,size)\n \n self.n=self.data_list.shape[0]\n #untyped by kernel\n self.scott,self.silverman=0,0\n #variance>0 for silverman,scott\n if variance(self.data_list) > .0:\n self.scott=get_slv_sct(self.data_list,'scott')\n self.silverman=get_slv_sct(self.data_list,'silverman')\n self.search_list=np.concatenate([self.search_list,np.array([self.scott,self.silverman])])\n #print(self.search_list)\n #to method\n grid = GridSearchCV(KernelDensity(kernel=self.kernel),\n {'bandwidth': self.search_list},cv=min([self.n,self.cv_num]))\n grid.fit(self.data_list[:,None])#[[data1],[data2],...]\n #best is ideal,bw is actual\n self.best=grid.best_params_['bandwidth']\n self.bw=self.best\n while True:\n tmp=self.cdf(1.0)\n if tmp!=0 and (not tmp==float(\"inf\")) and (not math.isnan(tmp)):\n break;\n self.bw*=10\n self.param={'cv_num':self.cv_num,'search_list':self.search_list,'bw':self.bw,'best_bw':self.best,'kernel':self.kernel,'score_type':self.score_type}\n inner_set_param(args['training_data'],args['score_type'])\n def pdf(self, x: float) -> float:\n res=0\n if self.kernel==share.GAUSSIAN:\n for i in self.data_list:\n temp=((x-i)/self.bw)**2\n res+=math.exp(-1/2*temp)\n res*=1/math.sqrt(2*math.pi)/self.n/self.bw\n elif self.kernel==share.TOPHAT:\n for i in self.data_list:\n res+=tophat((x-i)/self.bw)\n res*=1/self.n/self.bw\n return res\n def cdf(self, x: float) -> float:\n if self.kernel==share.TOPHAT:\n res=0.0\n for v in share.DISC_SCORE_SPACE_DICT[self.score_type]:\n if x == v:\n res+=self.pdf(v)*self.bw\n break\n elif v < x:\n res+=self.pdf(v)*2*self.bw\n else:\n break\n else:\n res=integrate.quad(self.pdf,-float('inf'), x)[0]\n if math.isnan(res):\n res=.0\n elif res >1 :\n res=1\n return res\n\nclass Norm(Marginal):\n def __init__(self,marg_name:str):\n super().__init__()\n self.marg_name=share.GAUSSIAN\n def set_param(self,**args):\n def inner_set_param(training_data:np.array):\n training_data=pd.Series(training_data)#pd.std not equal to np.std\n self.mean = training_data.mean()\n self.sd = training_data.std()\n self.param= {'mean':self.mean,'std':self.sd}\n inner_set_param(args['training_data'])\n def pdf(self, x: float) -> float:\n return norm.pdf(x=x, loc=self.mean, scale=self.sd)\n" ]
[ [ "scipy.stats.norm.pdf", "numpy.delete", "numpy.array", "sklearn.neighbors.kde.KernelDensity", "numpy.sqrt", "numpy.append", "scipy.stats.kde.gaussian_kde", "numpy.linspace", "pandas.Series", "numpy.logspace" ] ]
Gouzhong1223/Dubhe
[ "8959a51704410dc38b595a0926646b9928451c9a" ]
[ "dubhe-visual-server/parser_service/tbparser/summary_reader.py" ]
[ "# -*- coding: UTF-8 -*-\n# MIT License\n#\n# Copyright (c) 2019 Vadim Velicodnii\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge,\n# publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom typing import Union, Optional\nimport numpy as np\nimport struct\nfrom io import BytesIO\n# Compatible tensorboard calculation graph\nfrom tensorboard.compat.proto.graph_pb2 import GraphDef\nfrom oneflow.customized.utils import HParamsPluginData\n# from tensorboard.plugins.hparams.plugin_data_pb2 import HParamsPluginData\nfrom tbparser.events_reader import EventReadingError, EventsFileReader\n\nSummaryItem = namedtuple(\n 'SummaryItem', ['tag', 'step', 'wall_time', 'value', 'type']\n)\nGraphItem = namedtuple(\n 'GraphItem', ['wall_time', 'value', 'type']\n)\n\n# tensor data type\n_data_type = {1: 'float',\n 2: 'double',\n 3: 'int32',\n # DT_UINT8 = 4;\n # DT_INT16 = 5;\n # DT_INT8 = 6;\n # DT_STRING = 7;\n # DT_COMPLEX64 = 8; // Single-precision complex\n 9: 'int64',\n 10: 'bool',\n # DT_QINT8 = 11; // Quantized int8\n # DT_QUINT8 = 12; // Quantized uint8\n # DT_QINT32 = 13; // Quantized int32\n # DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops.\n # DT_QINT16 = 15; // Quantized int16\n # DT_QUINT16 = 16; // Quantized uint16\n 17: 'uint16',\n # DT_COMPLEX128 = 18; // Double-precision complex\n # DT_HALF = 19;\n # DT_RESOURCE = 20;\n # DT_VARIANT = 21; // Arbitrary C++ data types\n 22: 'uint32',\n 23: 'uint64'}\n\n\ndef _decode_byte(tensor):\n # ่‹ฅtensorๆ˜ฏfloat็ฑปๅž‹\n if tensor.dtype == 1:\n return struct.unpack('f', tensor.tensor_content)[0]\n\n\nclass SummaryReader(Iterable):\n \"\"\"\n Iterates over events in all the files in the current logdir.\n \"\"\"\n\n def _get_scalar(self, value):\n \"\"\"\n Decode an scalar event\n :param value: A value field of an event\n :return: Decoded scalar\n \"\"\"\n if value.HasField('simple_value'):\n return value.simple_value\n elif value.HasField('metadata'):\n if value.metadata.plugin_data.plugin_name == 'scalars':\n tensor = value.tensor\n return _decode_byte(tensor)\n return None\n\n def _get_image(self, value) -> Optional[dict]:\n \"\"\"\n Decode an image event\n :param value: A value field of an event\n :return: Decoded image\n \"\"\"\n if value.HasField('image'):\n dic = {\n 'width': value.image.width,\n 'height': value.image.height,\n 'colorspace': value.image.colorspace,\n 'encoded_image_string': value.image.encoded_image_string\n }\n return dic\n return None\n\n def _get_text(self, value) -> Optional[np.ndarray]:\n \"\"\"\n Return text data\n :param value: A value field of an event\n :return: text data\n TODO: Tensorflow API\n \"\"\"\n if value.HasField('metadata'):\n if value.metadata.plugin_data.plugin_name == 'text':\n return np.array([v.decode() for v in value.tensor.string_val])\n return None\n\n def _get_audio(self, value):\n if value.HasField('audio'):\n dic = {\n 'sample_rate': value.audio.sample_rate,\n 'num_channels': value.audio.num_channels,\n 'length_frames': value.audio.length_frames,\n 'encoded_audio_string': value.audio.encoded_audio_string\n }\n return dic\n # if Tensorboard API, use tensor decoder\n elif value.HasField('metadata'):\n if value.metadata.plugin_data.plugin_name == 'audio':\n dic = {\n 'tensor_shape': tuple([i.size for i in value.tensor.tensor_shape.dim]),\n 'string_val': [v for v in value.tensor.string_val]\n }\n if value.tag not in self.audio_exit_tag:\n # record the tag\n self.audio_exit_tag.append(value.tag)\n return dic\n elif value.tag in self.audio_exit_tag:\n dic = {\n 'tensor_shape': tuple([i.size for i in value.tensor.tensor_shape.dim]),\n 'string_val': [v for v in value.tensor.string_val]\n }\n return dic\n return None\n\n def _get_hist(self, value):\n if value.HasField('histo'):\n dic = {\n 'min': value.histo.min,\n 'max': value.histo.max,\n 'num': value.histo.num,\n 'sum': value.histo.sum,\n 'sum_squares': value.histo.sum_squares,\n 'bucket_limit': np.array(value.histo.bucket_limit),\n 'bucket': np.array(value.histo.bucket),\n }\n return dic\n # if Tensorboard API, use tensor decoder\n elif value.HasField('metadata'):\n if value.metadata.plugin_data.plugin_name == 'histograms':\n tensor = value.tensor\n dtype = _data_type[tensor.dtype]\n tensor_shape = tuple([i.size for i in tensor.tensor_shape.dim])\n tensor_content = tensor.tensor_content\n tensor_content = np.frombuffer(tensor_content, dtype=dtype)\n if value.tag not in self.hist_exit_tag:\n # record the tag\n self.hist_exit_tag.append(value.tag)\n return tensor_content.reshape(tensor_shape)\n elif value.tag in self.hist_exit_tag:\n tensor = value.tensor\n dtype = _data_type[tensor.dtype]\n tensor_shape = tuple([i.size for i in tensor.tensor_shape.dim])\n tensor_content = tensor.tensor_content\n tensor_content = np.frombuffer(tensor_content, dtype=dtype)\n return tensor_content.reshape(tensor_shape)\n return None\n\n def _get_hparams(self, value):\n if value.HasField('metadata'):\n if value.metadata.plugin_data.plugin_name == 'hparams':\n metadata = value.metadata\n plugin_data = HParamsPluginData()\n plugin_data.ParseFromString(metadata.plugin_data.content)\n return plugin_data\n\n _DECODERS = {\n 'scalar': _get_scalar,\n 'image': _get_image,\n 'text': _get_text,\n 'audio': _get_audio,\n 'hist': _get_hist,\n 'hparams': _get_hparams,\n }\n\n def __init__(\n self,\n fileblock: BytesIO,\n tag_filter: Optional[Iterable] = None,\n types: Iterable = ('scalar',),\n stop_on_error: bool = False\n ):\n \"\"\"\n Initalize new summary reader\n :param fileblock: Event file block of Tensorboard\n :param tag_filter: A list of tags to leave (`None` for all)\n :param types: A list of types to get.\n :param stop_on_error: Whether stop on a broken file\n \"\"\"\n self._fileblock = fileblock\n\n self._tag_filter = set(tag_filter) if tag_filter is not None else None\n self._types = set(types)\n self._check_type_names()\n self._stop_on_error = stop_on_error\n # Record the tag, that has been read by the parser.\n # If the tag, in this list appears next,\n # the type is automatically identified.\n self.scalar_exit_tag = []\n self.image_exit_tag = []\n self.text_exit_tag = []\n self.audio_exit_tag = []\n self.hist_exit_tag = []\n\n def _check_type_names(self):\n if self._types is None:\n return\n if not all(\n type_name in self._DECODERS.keys() or type_name == \"graph\" for type_name in self._types\n ):\n raise ValueError('Invalid type name')\n\n # def _decode_events(self, events: Iterable) -> Optional[Union[SummaryItem, GraphDef]]:\n def _decode_events(self, events: Iterable) \\\n -> Optional[Union[SummaryItem]]:\n \"\"\"\n Convert events to `SummaryItem` instances\n :param events: An iterable with events objects\n :return: A generator with decoded events\n or `None`s if an event can't be decoded\n \"\"\"\n for event in events:\n # yield None\n step = event.step\n wall_time = event.wall_time\n if event.HasField('summary'):\n for value in event.summary.value:\n tag = value.tag\n # if value.HasField('metadata'):\n # continue\n for value_type in self._types:\n if value_type == \"graph\":\n continue\n decoder = self._DECODERS[value_type]\n data = decoder(self, value)\n if data is not None:\n yield SummaryItem(\n tag=tag,\n step=step,\n wall_time=wall_time,\n value=data,\n type=value_type\n )\n else:\n yield None\n elif event.HasField('graph_def'):\n graph = GraphDef()\n graph.ParseFromString(event.graph_def)\n yield GraphItem(\n wall_time=wall_time,\n value=graph,\n type='graph'\n )\n\n def _check_tag(self, tag: str) -> bool:\n \"\"\"\n Check if a tag matches the current tag filter\n :param tag: A string with tag\n :return: A boolean value.\n \"\"\"\n return self._tag_filter is None or tag in self._tag_filter\n\n def __iter__(self) -> SummaryItem:\n \"\"\"\n Iterate over events in all the files in the current logdir\n :return: A generator with `SummaryItem` objects\n \"\"\"\n reader = EventsFileReader(self._fileblock)\n try:\n yield from (\n item for item in self._decode_events(reader)\n if item is not None and all([\n self._check_tag(None if type(item) == GraphItem else item.tag),\n item.type in self._types\n ])\n )\n except EventReadingError:\n if self._stop_on_error:\n raise\n else:\n yield\n" ]
[ [ "numpy.array", "numpy.frombuffer" ] ]
olasson/SDCND-P4-AdvancedLaneLines
[ "b0b39c5ec255d523ecc3c53a9a488c0f3dbeeab6" ]
[ "code/_draw.py" ]
[ "\"\"\"\nThis file contains internal, supporting functions for detect.py. They are used for debugging and actually drawing the detected lane lines.\n\"\"\"\n\nimport cv2\nimport numpy as np\n\ndef _draw_line(image, line, color = [255, 0, 0], thickness = 10):\n \"\"\"\n Wrapper for cv2.line\n\n Inputs\n ----------\n image: numpy.ndarray\n A single RGB image\n line: numpy.ndarray\n An array containing four values - [x1, y1, x2, y2] - defining a line\n color: list\n A list with three elements defining a color in RGB space\n thickness: int\n How many pixels thick the drawn line will be\n\n Outputs\n -------\n The original 'image' will be modified with a drawn in line\n NOT a copy\n \"\"\"\n\n cv2.line(image, (int(line[0]), int(line[1])),\n (int(line[2]), int(line[3])), color = color, thickness = thickness)\n\ndef _draw_region(image, points):\n \"\"\"\n Draw a region defined by four points\n\n Inputs\n ----------\n image: numpy.ndarray\n A single RGB image\n points: numpy.ndarray\n An array containing exactly four points - [p1, p2, p3, p4] where pN = [xN ,yN]\n\n Outputs\n -------\n image_out: numpy.ndarray\n A copy of 'image' with a region drawn in\n \"\"\"\n\n line_1 = np.array([points[0][0], points[0][1], points[2][0], points[2][1]]) # Top left to bottom left\n line_2 = np.array([points[1][0], points[1][1], points[3][0], points[3][1]]) # Top right to bottom right\n\n line_3 = np.array([points[0][0], points[0][1], points[1][0], points[1][1]]) # Top left to top right\n line_4 = np.array([points[2][0], points[2][1], points[3][0], points[3][1]]) # Bottom left to bottom right\n\n image_out = np.copy(image)\n\n _draw_line(image_out, line_1)\n _draw_line(image_out, line_2)\n _draw_line(image_out, line_3)\n _draw_line(image_out, line_4)\n\n return image_out\n\ndef _draw_lanes(image, n_rows, left_fit, right_fit, thickness = 20, fill_color = [0, 255, 0]):\n \"\"\"\n Draw a region defined by four points\n\n Inputs\n ----------\n image: numpy.ndarray\n A single RGB image\n n_rows: int\n The number of rows in 'image'\n left_fit, right_fit: numpy.ndarray, numpy.ndarray\n Numpy arrays containing polynomial coefficients from np.polyfit\n thickness: int\n How many pixels thick the drawn line will be\n fill_color: list\n List containing three ints describing the RGB color used to fill in between detected lanes\n\n\n Outputs\n -------\n image_out: numpy.ndarray\n A copy of 'image' with both lane lines drawn in and the space between them filled with color\n \"\"\"\n\n y_vals = range(0, n_rows)\n\n left_x_vals = left_fit[0] * y_vals * y_vals + left_fit[1] * y_vals + left_fit[2]\n right_x_vals = right_fit[0] * y_vals * y_vals + right_fit[1] * y_vals + right_fit[2]\n\n image_out = np.zeros_like(image)\n\n cv2.polylines(image_out, np.int_([list(zip(left_x_vals, y_vals))]), False, (255, 0, 0), thickness)\n cv2.polylines(image_out, np.int_([list(zip(right_x_vals, y_vals))]), False, (0, 0, 255), thickness)\n\n if fill_color is not None:\n\n offset = thickness / 2\n\n inner_x = np.concatenate((left_x_vals + offset, right_x_vals[::-1] - offset), axis = 0)\n inner_y = np.concatenate((y_vals, y_vals[::-1]), axis = 0)\n\n cv2.fillPoly(image_out, np.int_([list(zip(inner_x, inner_y))]), color = fill_color)\n\n return image_out\n\ndef _draw_text(image, curvature, deviation, font_color = (0, 255, 0)):\n \"\"\"\n Draw lane line metadata in an image\n\n Inputs\n ----------\n image : numpy.ndarray\n A single RGB image\n curvature: numpy.ndarray\n Tuple containing the left and right lane line curvature\n deviation: float\n How much the detected lane lines deviates from the center polynomial of the lane\n thickness: int\n How many pixels thick the drawn line will be\n font_color: list\n List containing three ints describing the RGB color used for the text\n \n\n\n Outputs\n -------\n image_out: numpy.ndarray\n A copy of 'image' with metadata drawn in\n \"\"\"\n \n curvature_str1 = 'Left Curvature: ' + str(round(curvature[0], 3)) \n curvature_str2 = 'Right Curvature: ' + str(round(curvature[1], 3))\n deviation_str = 'Center deviation: ' + str(round(deviation, 3))\n\n cv2.putText(image, curvature_str1, (30, 60), cv2.FONT_HERSHEY_DUPLEX, 1, font_color, 2)\n cv2.putText(image, curvature_str2, (30, 90), cv2.FONT_HERSHEY_DUPLEX, 1, font_color, 2)\n cv2.putText(image, deviation_str, (30, 120), cv2.FONT_HERSHEY_DUPLEX, 1, font_color, 2)" ]
[ [ "numpy.zeros_like", "numpy.array", "numpy.copy", "numpy.concatenate" ] ]
NikolaiSie/django
[ "a19bb9813d7ed957d05edeeaf380988265aae49d" ]
[ "projects/writing/sigurd/views.py" ]
[ "from django.shortcuts import render\nimport numpy as np\nimport datetime\n\nfrom .models import Haiku, Chapter, Poem, Story\n\ndef index(request):\n haiku_id = datetime.datetime.now().strftime(\"%d\")\n haiku = Haiku.objects.get(pk=haiku_id)\n return render(request, 'home.html', {'haiku': haiku, \"line\": haiku_id})\n\n\ndef haiku_by_id(request, haiku_id):\n haiku = Haiku.objects.get(pk=haiku_id)\n return render(request, 'haiku.html', {'haiku': haiku})\n\ndef odinsbane(request):\n prologue = Chapter.objects.get(pk=1)\n chapter1 = Chapter.objects.get(pk=2)\n return render(request, 'odinsbane.html', {'prologue': prologue, 'chapter1': chapter1})\n\ndef poem(request):\n num_poems = Poem.objects.all().count()\n poem = Poem.objects.get(pk=np.random.randint(1,num_poems + 1))\n return render(request, 'poem.html', {'poem': poem})\n\ndef story_by_id(request, story_id):\n story = Story.objects.get(pk=story_id)\n return render(request, 'story.html', {'story': story})\n\n\ndef author(request):\n return render(request, 'author.html')" ]
[ [ "numpy.random.randint" ] ]
caciolai/Generative-Dog-Images-with-BigGan
[ "535d5a9c35ad4187a6f0099c4a02772a45c4a064" ]
[ "src/gan-dogs/model/serialization.py" ]
[ "import tensorflow as tf\nfrom tensorflow.python.framework.tensor_conversion_registry import get\n\nfrom .gan import build_gan\n\n\ndef load_model(use_model, num_classes, latent_dim, img_width, img_height, checkpoint_path):\n \"\"\"Load model from checkpoint on disk.\n\n Args:\n use_model (str): name of model (DCGAN, BigGAN)\n num_classes (int): number of classes\n latent_dim (int): dimension of latent space\n img_width (int): width of incoming images\n img_height (int): height of incoming images\n checkpoint_path (str): path of the checkpoint to load on disk\n\n Returns:\n GAN: loaded GAN model\n \"\"\"\n\n gan = build_gan(use_model, num_classes, img_width, img_height)\n\n # testing\n random_latent_vectors = tf.random.normal(\n shape=(1, latent_dim)\n )\n random_labels = tf.math.floor(num_classes * tf.random.uniform((1, 1)))\n\n inputs = (random_latent_vectors, random_labels)\n _ = gan(inputs)\n\n path = checkpoint_path\n # path = \"/content/drive/My Drive/VisionPerception/SavedModel/BigGAN_dogs_64_450_epochs/checkpoint.ckpt\"\n gan.load_weights(path)\n\n return gan\n\n\ndef save_model(gan, save_path):\n \"\"\"Save GAN model to disk\n\n Args:\n gan (GAN): GAN model\n save_path (str): path where to save the model weights\n \"\"\"\n gan.save_weights(save_path)\n" ]
[ [ "tensorflow.random.normal", "tensorflow.random.uniform" ] ]
Learn-Live/activity_recognition
[ "76fa7bcecc3e422f1ea59fd1aaf576669e1248fb" ]
[ "ar/features/videopose3d/data/prepare_data_h36m.py" ]
[ "# Copyright (c) 2018-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport argparse\nimport os\nimport zipfile\nimport numpy as np\nimport h5py\nfrom glob import glob\nfrom shutil import rmtree\n\nimport sys\n\nsys.path.append('../')\nfrom common.h36m_dataset import Human36mDataset\nfrom common.camera import world_to_camera, project_to_2d, image_coordinates\nfrom common.utils import wrap\n\noutput_filename = 'data_3d_h36m'\noutput_filename_2d = 'data_2d_h36m_gt'\nsubjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']\n\nif __name__ == '__main__':\n if os.path.basename(os.getcwd()) != 'data':\n print('This script must be launched from the \"data\" directory')\n exit(0)\n\n parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter')\n\n # Convert dataset preprocessed by Martinez et al. in https://github.com/una-dinosauria/3d-pose-baseline\n parser.add_argument('--from-archive', default='', type=str, metavar='PATH', help='convert preprocessed dataset')\n\n # Convert dataset from original source, using files converted to .mat (the Human3.6M dataset path must be specified manually)\n # This option requires MATLAB to convert files using the provided script\n parser.add_argument('--from-source', default='', type=str, metavar='PATH', help='convert original dataset')\n\n # Convert dataset from original source, using original .cdf files (the Human3.6M dataset path must be specified manually)\n # This option does not require MATLAB, but the Python library cdflib must be installed\n parser.add_argument('--from-source-cdf', default='', type=str, metavar='PATH', help='convert original dataset')\n\n args = parser.parse_args()\n\n if args.from_archive and args.from_source:\n print('Please specify only one argument')\n exit(0)\n\n # if os.path.exists(output_filename + '.npz'):\n # print('The dataset already exists at', output_filename + '.npz')\n # exit(0)\n\n if args.from_archive:\n\n print('Extracting Human3.6M dataset from', args.from_archive)\n with zipfile.ZipFile(args.from_archive, 'r') as archive:\n archive.extractall()\n\n print('Converting...')\n output = {}\n for subject in subjects:\n output[subject] = {}\n file_list = glob('h36m/' + subject + '/MyPoses/3D_positions/*.h5')\n # file_list = glob('h36m/' + subject + '/StackedHourglass/*.h5')\n print(subject, file_list)\n assert len(file_list) == 30, \"Expected 30 files for subject \" + subject + \", got \" + str(len(file_list))\n for f in file_list:\n action = os.path.splitext(os.path.basename(f))[0]\n\n if subject == 'S11' and action == 'Directions':\n continue # Discard corrupted video\n\n with h5py.File(f) as hf:\n # check h5 file content: list(hf.items())\n # positions = hf['3D_positions'].value.reshape(32, 3, -1).transpose(2, 0, 1)\n positions = hf['3D_positions'][:].reshape(32, 3, -1).transpose(2, 0, 1)\n positions /= 1000 # Meters instead of millimeters\n output[subject][action] = positions.astype('float32')\n\n print('Saving...')\n print(f'3d: {output_filename}')\n np.savez_compressed(output_filename, positions_3d=output)\n\n # print('Cleaning up...')\n # rmtree('h36m')\n\n print('Done.')\n\n elif args.from_source:\n print('Converting original Human3.6M dataset from', args.from_source)\n output = {}\n\n from scipy.io import loadmat\n\n for subject in subjects:\n output[subject] = {}\n file_list = glob(args.from_source + '/' + subject + '/MyPoseFeatures/D3_Positions/*.cdf.mat')\n assert len(file_list) == 30, \"Expected 30 files for subject \" + subject + \", got \" + str(len(file_list))\n for f in file_list:\n action = os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0]\n\n if subject == 'S11' and action == 'Directions':\n continue # Discard corrupted video\n\n # Use consistent naming convention\n canonical_name = action.replace('TakingPhoto', 'Photo') \\\n .replace('WalkingDog', 'WalkDog')\n\n hf = loadmat(f)\n positions = hf['data'][0, 0].reshape(-1, 32, 3)\n positions /= 1000 # Meters instead of millimeters\n output[subject][canonical_name] = positions.astype('float32')\n\n print('Saving...')\n np.savez_compressed(output_filename, positions_3d=output)\n\n print('Done.')\n\n elif args.from_source_cdf:\n print('Converting original Human3.6M dataset from', args.from_source_cdf, '(CDF files)')\n output = {}\n\n import cdflib\n\n for subject in subjects:\n output[subject] = {}\n file_list = glob(args.from_source_cdf + '/' + subject + '/MyPoseFeatures/D3_Positions/*.cdf')\n print(subject, file_list)\n assert len(file_list) == 30, \"Expected 30 files for subject \" + subject + \", got \" + str(len(file_list))\n for f in file_list:\n action = os.path.splitext(os.path.basename(f))[0]\n\n if subject == 'S11' and action == 'Directions':\n continue # Discard corrupted video\n\n # Use consistent naming convention\n canonical_name = action.replace('TakingPhoto', 'Photo') \\\n .replace('WalkingDog', 'WalkDog')\n\n hf = cdflib.CDF(f)\n positions = hf['Pose'].reshape(-1, 32, 3)\n positions /= 1000 # Meters instead of millimeters\n output[subject][canonical_name] = positions.astype('float32')\n\n print('Saving...')\n np.savez_compressed(output_filename, positions_3d=output)\n\n print('Done.')\n\n else:\n print('Please specify the dataset source')\n exit(0)\n\n # Create 2D pose file\n print('')\n print('Computing ground-truth 2D poses...')\n dataset = Human36mDataset(output_filename + '.npz')\n output_2d_poses = {}\n for subject in dataset.subjects():\n output_2d_poses[subject] = {}\n for action in dataset[subject].keys():\n anim = dataset[subject][action]\n\n positions_2d = []\n for cam in anim['cameras']:\n pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])\n # Project 3D points to 2D using the Human3.6M camera projection function.\n pos_2d = wrap(project_to_2d, pos_3d, cam['intrinsic'], unsqueeze=True)\n pos_2d_pixel_space = image_coordinates(pos_2d, w=cam['res_w'], h=cam['res_h'])\n positions_2d.append(pos_2d_pixel_space.astype('float32'))\n output_2d_poses[subject][action] = positions_2d\n\n print('Saving...')\n metadata = {\n 'num_joints': dataset.skeleton().num_joints(),\n 'keypoints_symmetry': [dataset.skeleton().joints_left(), dataset.skeleton().joints_right()]\n }\n print(f'2d: {output_filename_2d}')\n np.savez_compressed(output_filename_2d, positions_2d=output_2d_poses, metadata=metadata)\n\n print('Done.')\n" ]
[ [ "scipy.io.loadmat", "numpy.savez_compressed" ] ]
MatthiasGolomb/effmass
[ "8d61851bdd3f8a60b9c639014effa346bce93ece" ]
[ "effmass/inputs.py" ]
[ "#! /usr/bin/env python3\n\n\"\"\"\nA module for storing electronic structure data and user settings. Currently supported codes are VASP and FHI-Aims (with limited functionality).\n\nThe module contains a :class:`Data` class which parses OUTCAR and PROCAR files using the `vasppy <https://github.com/bjmorgan/vasppy>`_ package. \nA function for parsing DOSCAR files is also provided. \n\nThe module contains a :class:`DataAims` class which parses and stores the `geometry.in`/`calculation.out` files generated for/from a FHI-AIMS calculation.\n\nA :class:`Settings` class stores analysis parameters set by the user.\n\n\"\"\"\nfrom vasppy import procar, outcar\nfrom effmass import extrema\nfrom ase.calculators.castep import Castep\nfrom ase import io \nimport ase.io\nimport math\nimport warnings\nimport numpy as np\n\n\nclass Settings:\n \"\"\"Class for setting analysis parameters.\n\n Attributes: energy_range (float): energy in kT over which the\n segment extends. extrema_search_depth (float): energy in kT from\n bandedge over which to search for extrema. degree_bandfit (int):\n the degree of the polynomial which is used to fit to dispersion data\n when calculating the transport mass.\n \"\"\"\n\n def __init__(self,\n energy_range=0.25,\n extrema_search_depth=0.025,\n bandfit=6):\n \"\"\"Initialises an instance of the Settings class and checks input using\n :meth:`check_settings()`.\n\n Args:\n energy_range (float): energy in eV over which the segment extends. Defaults to 0.25 eV.\n extrema_search_depth (float): energy in eV from bandedge over which to search for extrema. Defaults to 0.025 eV.\n degree_bandfit (int): the degree of the polynomial which is used to fit to dispersion data when calculating the transport mass.\n\n Returns:\n None.\n \"\"\"\n self.energy_range = energy_range\n self.extrema_search_depth = extrema_search_depth\n self.degree_bandfit = bandfit\n self.check_settings()\n\n def check_settings(self):\n \"\"\"Check that Settings class attributes are sane.\n\n Args:\n None.\n\n Returns:\n None.\n \"\"\"\n assert (self.energy_range >\n 0), \"The energy range must be a positive number\"\n assert (self.extrema_search_depth >\n 0), \"The energy depth must be a positive number\"\n assert (\n type(self.degree_bandfit) == int and self.degree_bandfit > 1\n ), \"The bandfit degree must be a positive integer greater than 1\" \n\nclass Data():\n r\"\"\"Parent class for parsing and storing data from bandstructure calculations. Contains a :meth:`check_data` method for basic checks on bandstructure data.\n\n\n Attributes:\n spin_channels (int): 1 (non-spin-polarised), 2 (spin-polarised), 4 (spin-orbit coupling).\n number_of_kpoints (int): the number of k-points per band.\n number_of_bands (int): the number of bands.\n kpoints (array(float)): 2-dimensional array with shape (number_of_kpoints, 3). Each row contains the fractional coordinates of a kpoint [kx,ky,kz].\n energies (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains energies of eigenstates in eV for a particular band.\n occupancy (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains occupation number of the eigenstates for a particular band. Values range from 0-1 (spin-polarised) or 0-2 (non-spin-polarised).\n reciprocal_lattice (list(float)): the reciprocal lattice vectors in format [[x1,y1,z1],[x2,y2,z2],[x3,y3,z3]], units Angstrom :math:`^{-1}`.\n CBM (float): the conduction band minimum energy in eV.\n VBM (float): the valence band maximum in eV.\n fermi_energy (float): the fermi energy in eV.\"\"\" \n\n def __init__(self):\n r\"\"\"\n Initialises an instance of the :class:`~effmass.inputs.Data` class. All attributes are None until set by the derived class.\n\n Args:\n None.\n\n Returns: \n None.\n \"\"\"\n \n\n self.spin_channels = None\n self.number_of_bands = None\n self.number_of_kpoints = None\n self.energies = None\n self.occupancy = None\n self.kpoints = None\n self.fermi_energy = None\n self.reciprocal_lattice = None\n self.CBM = None\n self.VBM = None\n\n def check_data(self, spin_channels, number_of_kpoints, number_of_bands, CBM, \n VBM, fermi_energy, occupancy):\n \"\"\"Check that Data class attributes make basic sense.\n\n Args:\n None.\n\n Returns:\n None.\n\n Notes:\n There is a similar method that runs automatically when reading data in using the `vasppy.procar <http://vasppy.readthedocs.io/en/latest/vasppy.html#module-vasppy.procar>`_ module.\n \"\"\"\n assert (\n ((spin_channels == 1) | (spin_channels == 2) |\n (spin_channels == 4)) is True\n ), \"Spin channels must have value 1 (non spin-polarised) or 2 (spin-polarised)\"\n assert (type(number_of_kpoints) == int\n and number_of_kpoints > 0\n ), \"The number of kpoints is not a positive integer\"\n assert (type(number_of_bands) == int and number_of_bands > 0\n ), \"The number of bands is not a positive integer\"\n assert (CBM >\n VBM), \"The CBM energy is lower than than the VBM energy\"\n if fermi_energy < VBM:\n warnings.warn(\"The fermi energy is lower than the VBM\")\n if fermi_energy > CBM:\n warnings.warn(\"The fermi energy is higher than the CBM\")\n if occupancy is not None:\n if ((occupancy == 0) | (occupancy == 1) |\n (occupancy == 2)).all() is False:\n warnings.warn(\"You have partial occupancy of bands\")\n\n def find_cbm_vbm(self):\n self.CBM, self.VBM = extrema.calc_CBM_VBM_from_Fermi(self,CBMVBM_search_depth=4.0)\n\nclass DataASE(Data):\n\n r\"\"\"\n Class for interfacing with the ASE bandstructure object. Inherits attributes and methods from the :class:`~effmass.inputs.Data` class, and extends\n with a method for inferring the CBM/VBM from Fermi level.\n\n Note: DataASE.fermi_energy is taken from the seedname.out file. \n\n Note: The DataASE class does not parse eigenstate occupancy data. The Fermi energy will \\\n be used to infer which bands are occupied (below the fermi energy) and which are unoccupied (above \\\n the fermi energy). You should independently confirm that the fermi energy is in the band gap of \\\n your material. Note that you can manually set the `fermi_energy` attribute and find the CBM and VBM using the method `find_cbm_vbm`. \")\n\n \"\"\"\n\n\n def __init__(self, bs, atoms):\n r\"\"\"\n Initialises an instance of the :class:`~effmass.inputs.DataASE` class and infers which bands are occupied and unoccupied from the fermi level.\n\n Args:\n bs (ase.spectrum.band_structure.BandStructure): An instance of the ase.spectrum.band_structure.BandStructure object.\n\n Returns: \n None.\n \"\"\"\n\n warnings.warn(\"The DataASE class does not parse eigenstate occupancy data. The Fermi energy will \\\n be used to infer which bands are occupied (below the fermi energy) and which are unoccupied (above \\\n the fermi energy). You should independently confirm that the fermi energy is in the band gap of \\\n your material. Note that you can manually set the DataASE.fermi_energy attribute and then re-find the CBM and VBM using the method `DataASE.find_cbm_vbm`. \")\n\n super().__init__()\n\n self.spin_channels = bs.energies.shape[0]\n self.number_of_kpoints = bs.energies.shape[1]\n self.number_of_bands = bs.energies.shape[2]*bs.energies.shape[0]\n self.energies = bs.energies.transpose(1,0,2).reshape(self.number_of_kpoints,-1).transpose()\n self.kpoints = bs.path.kpts\n self.reciprocal_lattice = atoms.cell.reciprocal()*2*math.pi\n self.fermi_energy = bs.reference\n self.find_cbm_vbm()\n self.check_data(self.spin_channels, self.number_of_kpoints, self.number_of_bands, self.CBM, \n self.VBM, self.fermi_energy, self.occupancy)\n\n\nclass DataCastep(DataASE):\n\n r\"\"\"Class for parsing and storing data from a Castep bandstructure calculation. Inherits attributes and methods from the :class:`~effmass.inputs.DataASE` class.\"\"\"\n\n def __init__(self,directory_path,seedname):\n r\"\"\"\n Initialises an instance of the :class:`~effmass.inputs.DataCastep` class.\n\n Args:\n directory_path (str): The path to a directory containing seedname.cell, seedname.out and seedname.bands\n seedname (str): The name (without suffix) of the input and output files\n \n Returns: \n None.\n \"\"\"\n \n Castep_calculator = Castep(directory_path)\n Castep_calculator.atoms = io.read(directory_path+\"./\"+seedname+\".cell\", format='castep-cell')\n ASE_bandstructure = Castep_calculator.band_structure(directory_path+\"./\"+seedname+\".bands\")\n ASE_atoms = Castep_calculator.atoms\n super().__init__(ASE_bandstructure, ASE_atoms)\n\n\n\n# class DataQE(DataASE):\n\n# r\"\"\"Class for parsing and storing data from a Quantum Espresso bandstructure calculation. Inherits attributes and methods from the :class:`~effmass.inputs.DataASE` class.\"\"\"\n\n# def __init__(self,directory_path,seedname):\n# r\"\"\"\n# Initialises an instance of the :class:`~effmass.inputs.DataQE` class.\n\n# Args:\n\n \n# Returns: \n# None.\n# \"\"\"\n\n# QE_calculator = ase.calculators.espresso.Espresso()\n# QE_calculator.atoms = ase.io.espresso.read_espresso_out()\n# ASE_bandstructure = QE_calculator.band_structure()\n# super().__init__(self, ASE_bandstructure)\n\nclass DataVasp(Data):\n r\"\"\"\n Class for parsing and storing data from a vasp calculation. Extends the :class:`~effmass.inputs.Data` class to include support for analysing DOSCAR data\"\n \n Additional attributes:\n dos (array): 2-dimensional array. Each row contains density of states data (units \"number of states / unit cell\") at a given energy: [energy(float),dos(float)].\n integrated_dos: 2-dimensional array. Each row contains integrated density of states data at a given energy: [energy(float),integrated_dos(float)].\n \n Note: DataVasp.fermi_energy is automatically set to the mean of DataVasp.CBM and DataVasp.VBM.\n \"\"\"\n\n def __init__(self, outcar_path, procar_path, ignore=0, **kwargs):\n r\"\"\"\n Initialises an instance of the :class:`~effmass.inputs.Data` class and checks data using :meth:`check_data`.\n\n Args:\n outcar_path (str): The path to the OUTCAR file\n procar_path (:obj:`str` or :obj:`list`): The path(s) to one or more PROCAR files.\n \n ignore (int): The number of kpoints to ignore at the beginning of the bandstructure slice through kspace (useful for hybrid calculations where zero weightings are appended to a previous self-consistent calculation).\n **kwargs: Additional keyword arguments for reading the PROCAR file(s). \n \n Returns: \n None.\n \"\"\"\n\n super().__init__()\n\n assert (type(outcar_path) == str), \"The OUTCAR path must be a string\"\n assert (type(ignore) == int and ignore >= 0\n ), \"The number of kpoints to ignore must be a positive integer\"\n\n reciprocal_lattice = outcar.reciprocal_lattice_from_outcar(outcar_path)\n if isinstance(procar_path, list):\n vasp_data = procar.Procar.from_files(procar_path, **kwargs)\n elif isinstance(procar_path, str):\n vasp_data = procar.Procar.from_file(procar_path, **kwargs)\n else:\n raise TypeError('procar_path must be a string or list of strings')\n\n self.spin_channels = vasp_data.spin_channels\n self.number_of_bands = vasp_data.number_of_bands\n\n number_of_kpoints = vasp_data.number_of_k_points\n vasp_data_energies = np.array( [ band.energy for band in np.ravel( vasp_data.bands ) ] )\n vasp_data_occupancies = np.array( [ band.occupancy for band in np.ravel( vasp_data.bands ) ] )\n if vasp_data.calculation['spin_polarised']: # to account for the change in PROCAR format for calculations with 2 spin channels (1 k-point block ---> 2 k-point blocks)\n energies = np.zeros([self.number_of_bands*2,number_of_kpoints]) # This is a very ugly way to slice 'n' dice. Should avoid creating new array and use array methods instead. But it does the job so will keep for now.\n for i in range(self.number_of_bands):\n energies[i] = vasp_data_energies.reshape(\n number_of_kpoints*2, # factor of 2 for each kpoint block\n self.number_of_bands).T[i][:number_of_kpoints]\n energies[self.number_of_bands+i] = vasp_data_energies.reshape(\n number_of_kpoints*2,\n self.number_of_bands).T[i][number_of_kpoints:]\n occupancy = np.zeros([self.number_of_bands*2,number_of_kpoints])\n for i in range(self.number_of_bands):\n occupancy[i] = vasp_data_occupancies.reshape(\n number_of_kpoints*2,\n self.number_of_bands).T[i][:number_of_kpoints]\n occupancy[self.number_of_bands+i] = vasp_data_occupancies.reshape(\n number_of_kpoints*2,\n self.number_of_bands).T[i][number_of_kpoints:]\n else:\n energies = vasp_data_energies.reshape(\n number_of_kpoints,\n self.number_of_bands).T\n occupancy = vasp_data_occupancies.reshape(\n number_of_kpoints,\n self.number_of_bands).T\n\n # remove values which are from the self-consistent calculation prior to the bandstructure calculation (workflow for hybrid functionals)\n self.energies = np.delete(energies,list(range(ignore)),1)\n self.occupancy = np.delete(occupancy,list(range(ignore)),1)\n self.number_of_kpoints = number_of_kpoints - ignore\n\n # handle negative occupancy values\n if np.any(self.occupancy < 0):\n warnings.warn(\"One or more occupancies in your PROCAR file are negative. All negative occupancies will be set to zero.\")\n self.occupancy[ self.occupancy < 0 ] = 0.0\n\n self.kpoints = np.array( [ kp.frac_coords \n for kp in vasp_data.k_points[ignore:vasp_data.number_of_k_points] ] )\n self.reciprocal_lattice = reciprocal_lattice * 2 * math.pi\n self.CBM = extrema._calc_CBM(self.occupancy, self.energies)\n self.VBM = extrema._calc_VBM(self.occupancy, self.energies)\n self.fermi_energy = (self.CBM + self.VBM) / 2\n self.dos = []\n self.integrated_dos = []\n self.check_data(self.spin_channels, self.number_of_kpoints, self.number_of_bands, \n self.CBM, self.VBM, self.fermi_energy, self.occupancy)\n\n\n\n def parse_DOSCAR(self, filename='./DOSCAR'):\n \"\"\"Parses the DOS and integrated DOS from a vasp DOSCAR file.\n\n Args:\n filename (str, optional): The location and filename of the DOSCAR to read in. Defaults to `'./DOSCAR'`.\n\n Returns:\n None.\n\n Notes:\n If the DOS has been sampled at more than 10000 points then this function will break at the expression for `num_data_points`.\n In this case, edit your DOSCAR file so that in the header there is a space preceding the number of points.\n \"\"\"\n with open(filename, 'r') as f:\n lines = f.readlines()\n\n num_data_points = int(lines[5].split()[2])\n if len(lines[6].split()) == 5:\n self.dos = np.array([[\n float(x.split()[0]),\n float(x.split()[1]) + float(x.split()[2])\n ] for x in lines[6:num_data_points + 6]])\n self.integrated_dos = np.array([[\n float(x.split()[0]),\n float(x.split()[3]) + float(x.split()[4])\n ] for x in lines[6:num_data_points + 6]])\n elif len(lines[6].split()) == 3:\n self.dos = np.array([[float(x.split()[0]),\n float(x.split()[1])]\n for x in lines[6:num_data_points + 6]])\n self.integrated_dos = np.array(\n [[float(x.split()[0]),\n float(x.split()[2])] for x in lines[6:num_data_points + 6]])\n else:\n print(\"problem parsing DOSCAR\")\n return\n\n\nclass DataAims(Data):\n r\"\"\"\n Class for parsing and storing data from a FHI-AIMS calculation.\n\n Attributes:\n spin_channels (int): 1 (non-spin-polarised), 2 (spin-polarised), 4 (spin-orbit coupling).\n number_of_kpoints (int): the number of k-points per band.\n number_of_bands (int): the number of bands.\n kpoints (array(float)): 2-dimensional array with shape (number_of_kpoints, 3). Each row contains the fractional coordinates of a kpoint [kx,ky,kz].\n energies (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains energies of eigenstates in eV for a particular band.\n occupancy (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains occupation number of the eigenstates for a particular band. Values range from 0-1 (spin-polarised) or 0-2 (non-spin-polarised).\n reciprocal_lattice (list(float)): the reciprocal lattice vectors in format [[x1,y1,z1],[x2,y2,z2],[x3,y3,z3]], units Angstrom :math:`^{-1}`.\n CBM (float): the conduction band minimum energy in eV.\n VBM (float): the valence band maximum in eV.\n fermi_energy (float): the fermi energy in eV. Automatically set to the mean of Data.CBM and Data.VBM.\n \"\"\"\n\n def __init__(self, directory_path, output_name='calculation.out'):\n r\"\"\"\n Initialises an instance of the :class:`~effmass.inputs.DataAims` class and checks data using :meth:`check_data`.\n\n Args:\n directory_path (str): The path to the directory containing output, geometry.in, control.in and bandstructure files\n\t output_name (str): Name of the output file - contrary to the rest of the files, this is chosen by the user during an Aims run. Defaults to 'aims.out'.\n\n Returns:\n None.\n \"\"\"\n super().__init__()\n\n assert (type(directory_path) == str), \"The file path must be a string\"\n\n \"Finding reciprocal lattice vectors\"\n\n latvec = []\n\n for line in open(\"{}/geometry.in\".format(directory_path)):\n line = line.split(\"\\t\")[0]\n words = line.split()\n if len(words) == 0:\n continue\n if words[0] == \"lattice_vector\":\n if len(words) != 4:\n raise Exception(\"geometry.in: Syntax error in line '\"+line+\"'\")\n latvec.append(np.array(words[1:4]))\n\n if len(latvec) != 3:\n raise Exception(\"geometry.in: Must contain exactly 3 lattice vectors\")\n\n latvec = np.asarray(latvec)\n latvec = latvec.astype(np.float)\n\n #Calculate reciprocal lattice vectors\n rlatvec = []\n volume = (np.dot(latvec[0,:],np.cross(latvec[1,:],latvec[2,:])))\n rlatvec.append(np.array(2*math.pi*np.cross(latvec[1,:],latvec[2,:])/volume))\n rlatvec.append(np.array(2*math.pi*np.cross(latvec[2,:],latvec[0,:])/volume))\n rlatvec.append(np.array(2*math.pi*np.cross(latvec[0,:],latvec[1,:])/volume))\n\n reciprocal_lattice = np.asarray(rlatvec)\n self.reciprocal_lattice = reciprocal_lattice\n\n\n \"Finding spin channels\"\n\n spin_channels = 0\n\n for line in open(\"{}/{}\".format(directory_path, output_name)):\n line = line.split(\"\\t\")[0]\n if \"include_spin_orbit\" in line:\n spin_channels = 4\n break\n elif \"Number of spin channels\" in line:\n words = line.split()\n spin_channels = int(words[-1])\n break\n\n self.spin_channels = spin_channels\n\n \"Finding number of bands\"\n\n number_of_bands = 0\n\n for line in open(\"{}/{}\".format(directory_path, output_name)):\n line = line.split(\"\\t\")[0]\n if \"Number of Kohn-Sham\" in line:\n words = line.split()\n number_of_bands = int(words[-1])\n\n break\n\n if spin_channels == 2 or spin_channels == 4: #Doubling for spin-polarised calculation\n number_of_bands = 2*number_of_bands\n\n self.number_of_bands = number_of_bands\n\n \"Finding number of kpoints and determining number of BZ paths\"\n\n number_of_kpoints = 0\n number_of_BZ_paths = 0\n path_list = []\n\n for line in open(\"{}/{}\".format(directory_path, output_name)):\n line = line.split(\"\\n\")[0]\n if not line.startswith(\"#\") and \"output\" in line:\n if \"band\" in line:\n words = line.split()\n if words[0]==\"output\" and words[1]==\"band\":\n path_list.append(int(words[8]))\n number_of_BZ_paths += 1\n\n number_of_kpoints = sum(path_list)\n\n \"Reading out bandstructure files to determine kpoint, energy and occupation matrices\"\n\n kpoints = np.zeros([number_of_kpoints,3])\n energies = np.zeros([number_of_bands,number_of_kpoints])\n occupancy = np.zeros([number_of_bands,number_of_kpoints])\n path_counter = 0\n\n if spin_channels == 1 or spin_channels == 4:\n kpoint_counter = 0\n while path_counter<number_of_BZ_paths:\n kpoint_counter = sum(path_list[:path_counter])\n for line in open(\"{}/band1{:03d}.out\".format(directory_path, path_counter+1)):\n line = line.split(\"\\t\")[0]\n words = line.split()\n kpoints[int(kpoint_counter),0] = float(words[1])\n kpoints[int(kpoint_counter),1] = float(words[2])\n kpoints[int(kpoint_counter),2] = float(words[3])\n for i in range(number_of_bands):\n energies[i,int(kpoint_counter)] = float(words[5+2*i])\n occupancy[i,int(kpoint_counter)] = float(words[4+2*i])\n kpoint_counter += 1\n path_counter +=1\n\n if spin_channels == 2:\n while path_counter<number_of_BZ_paths:\n kpoint_counter = int(sum(path_list[:path_counter]))\n for line in open(\"{}/band1{:03d}.out\".format(directory_path, path_counter+1)):\n line = line.split(\"\\t\")[0]\n words = line.split()\n kpoints[int(kpoint_counter),0] = float(words[1])\n kpoints[int(kpoint_counter),1] = float(words[2])\n kpoints[int(kpoint_counter),2] = float(words[3])\n for i in range(number_of_bands//2):\n energies[i,int(kpoint_counter)] = float(words[5+2*i])\n occupancy[i,int(kpoint_counter)] = float(words[4+2*i])\n kpoint_counter += 1\n kpoint_counter = int(sum(path_list[:path_counter]))\n for line in open(\"{}/band2{:03d}.out\".format(directory_path, path_counter+1)):\n line = line.split(\"\\t\")[0]\n words = line.split()\n for i in range(number_of_bands//2):\n energies[number_of_bands//2+i,kpoint_counter] = float(words[5+2*i])\n occupancy[number_of_bands//2+i,kpoint_counter] = float(words[4+2*i])\n kpoint_counter += 1\n path_counter += 1\n\n \"Delete double kpoints at path edges\"\n\n index_count = len(kpoints)\n index = 0\n while index < index_count-1:\n if np.array_equal(kpoints[index],kpoints[index+1]):\n kpoints = np.delete(kpoints,index+1,axis=0)\n energies = np.delete(energies,index+1,axis=1)\n occupancy = np.delete(occupancy,index+1,axis=1)\n index_count = len(kpoints)\n index += 1\n\n self.number_of_kpoints = len(kpoints)\n\n\n self.CBM = extrema._calc_CBM(occupancy, energies)\n self.VBM = extrema._calc_VBM(occupancy, energies)\n self.fermi_energy = (self.CBM + self.VBM) / 2\n\n \"Cutting energy values in a range of 30 eV above and below the Fermi level. FHI AIMS is all electron, but not all states are needed for a meaningful effmass calculation\"\n\n index_count = len(occupancy)\n index = 0\n while index < index_count-1:\n if all(item < self.fermi_energy - 30 for item in energies[index]):\n energies = np.delete(energies, index, axis = 0)\n occupancy = np.delete(occupancy, index, axis = 0)\n index_count = len(occupancy)\n elif all(item > self.fermi_energy + 30 for item in energies[index]):\n energies = np.delete(energies, index, axis = 0)\n occupancy = np.delete(occupancy, index, axis = 0)\n index_count = len(occupancy)\n else:\n index += 1\n\n self.energies = energies\n self.occupancy = occupancy\n self.kpoints = kpoints\n\n self.check_data(self.spin_channels, self.number_of_kpoints, self.number_of_bands, \n self.CBM, self.VBM, self.fermi_energy, self.occupancy)\n\n\n" ]
[ [ "numpy.array", "numpy.delete", "numpy.array_equal", "numpy.asarray", "numpy.zeros", "numpy.any", "numpy.ravel", "numpy.cross" ] ]
fdcl-gwu/uav_simulator
[ "a31855babfe633ae326ecb36c4ff714066cdb269" ]
[ "scripts/matrix_utils.py" ]
[ "import numpy as np\nimport pdb\n\n\ndef hat(x):\n \"\"\"Returns the hat map of a given 3x1 vector.\n\n Args:\n x: (3x1 numpy array) vector\n\n Returns:\n x_hat: (3x3 numpy array) hat of the input vector\n \"\"\"\n \n x = x.reshape((3, 1)) # You may have to uncomment this line depending on\n # your NumPy version.\n x_hat = np.array([\n [0.0, -x[2], x[1]],\n [x[2], 0.0, -x[0]],\n [-x[1], x[0], 0.0]\n ])\n \n return x_hat\n\n\ndef vee(x):\n \"\"\"Returns the vee map of a given 3x3 matrix.\n\n Args:\n x: (3x3 numpy array) hat of the input vector\n\n Returns:\n (3x1 numpy array) vee map of the input matrix\n \"\"\"\n return np.array([x[2,1], x[0,2], x[1,0]])\n\n\ndef q_to_R(q):\n \"\"\"Converts a quaternion of a rotation matrix in SO(3).\n\n Args:\n q: (4x1 numpy array) quaternion\n\n Returns:\n R: (3x3 numpy array) rotation matrix corresponding to the quaternion\n \"\"\"\n\n R = np.identity(3)\n q13 = np.array([q[0], q[1], q[2]])\n q4 = q[3]\n\n hat_q = hat(q13)\n R += 2 * q4 * hat_q + 2 * hat_q.dot(hat_q)\n\n return R\n\n\ndef deriv_unit_vector(A, A_dot, A_2dot):\n \"\"\"Returns the unit vector and it's derivatives for a given vector.\n\n Args:\n A: (3x1 numpy array) vector\n A_dot: (3x1 numpy array) first derivative of the vector\n A_2dot: (3x1 numpy array) second derivative of the vector\n\n Returns:\n q: (3x1 numpy array) unit vector of A\n q_dot: (3x1 numpy array) first derivative of q\n q_2dot: (3x1 numpy array) second derivative of q\n \"\"\"\n\n nA = np.linalg.norm(A)\n nA3 = nA * nA * nA\n nA5 = nA3 * nA * nA\n\n A_A_dot = A.dot(A_dot)\n\n q = A / nA\n q_dot = A_dot / nA \\\n - A.dot(A_A_dot) / nA3\n\n q_2dot = A_2dot / nA \\\n - A_dot.dot(2 * A_A_dot) / nA3 \\\n - A.dot(A_dot.dot(A_dot) + A.dot(A_2dot)) / nA3 \\\n + 3 * A.dot(A_A_dot).dot(A_A_dot) / nA5\n\n return (q, q_dot, q_2dot)\n\n\ndef saturate(x, x_min, x_max):\n \"\"\"Saturate input vector between two values.\n \n Args:\n x: (nx1 array) value\n x_min: (float) minimum value for x\n x_max: (float) maximum value for x\n \n Returns:\n (nx1 array) saturated x\n \"\"\"\n\n for i in range(len(x)):\n if x[i] > x_max:\n x[i] = x_max\n elif x[i] < x_min:\n x[i] = x_min\n \n return x\n\n\ndef expm_SO3(r):\n \"\"\"Returns the matrix exponential of a matrix in SO(3).\n\n Args:\n r: (3x1 numpy array) vector\n\n Returns:\n R: (3x3 numpy array) matrix exponential of r\n \"\"\"\n\n theta = np.linalg.norm(r)\n\n y = sinx_over_x(theta)\n y2 = sinx_over_x(theta / 2)\n\n R = np.eye(3) + y * hat(r) + 0.5 * y2**2 * hat(r)**2\n\n return R\n\n\ndef sinx_over_x(x):\n \"\"\"Calculate sin(x)/x, while dealing with the cases where denominator is \n zero.\n\n Args:\n x: (float) value\n\n Returns:\n y: (float) value of sin(x)/x\n \"\"\"\n \n eps = 1e-6\n if abs(x) < eps:\n y = - x**10 / 39916800.0 + x**8 / 362880.0 - x**6 / 5040.0 \\\n + x**4 / 120.0 - x**2 / 6.0 + 1.0\n else:\n y = np.sin(x) / x\n \n return y\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.sin", "numpy.eye", "numpy.identity" ] ]
havelhuang/DeepConcolic
[ "d75a100a33dfc4e3674863bbcb3eb8a96b423e2b" ]
[ "GUAP/run_cifar.py" ]
[ "import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport argparse\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader, Dataset\nimport torchvision.utils as vutils\nimport logging\nimport os\nimport time\nimport datetime\nimport random\nimport torchvision.models as models\nimport attack_model\nfrom models import *\nfrom utils import *\nimport torch.backends.cudnn as cudnn\n\ntorch.cuda.empty_cache()\n\n\nif __name__ == '__main__':\n\n\n if not os.path.exists('log'):\n os.mkdir('log')\n\n logger = logging.getLogger(__name__)\n logging.basicConfig(\n format='[%(asctime)s] - %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S',\n level=logging.DEBUG)\n # level=logging.INFO,\n # filename='log/CIFAR10GUAP_'+datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')+'.log')\n logging.getLogger('matplotlib.font_manager').disabled = True\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, default='CIFAR10', help='CIFAR10')\n parser.add_argument('--lr', type=float, required=False, default=0.01, help='Learning rate')\n parser.add_argument('--batch-size', default=100, type=int)\n parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train for')\n parser.add_argument('--l2reg', type=float, default=0.0001, help='weight factor for l2 regularization')\n parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\n parser.add_argument('--tau', type=float, default=0.1, help='max flow magnitude, default=0.1')\n parser.add_argument('--allow', type=float, default=0.03, help='allow for linf noise. default=0.03')\n parser.add_argument('--model', type=str, default='VGG19', help='VGG19/ResNet101/DenseNet121')\n parser.add_argument('--manualSeed', type=int, default=5198, help='manual seed')\n parser.add_argument('--gpuid', type=str, default='0', help='multi gpuid')\n\n args = parser.parse_args()\n logger.info(args)\n tau = args.tau\n lr = args.lr\n dataSet = args.dataset\n batch_size = args.batch_size\n allow = args.allow\n model_name = args.model\n epochs = args.epochs\n gpuid = args.gpuid\n # 'DenseNet121','VGG19','ResNet101'\n\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpuid\n\n random.seed(args.manualSeed)\n np.random.seed(args.manualSeed)\n torch.manual_seed(args.manualSeed)\n torch.cuda.manual_seed(args.manualSeed)\n torch.cuda.manual_seed_all(args.manualSeed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n print('Generalizing Universarial Adversarial Examples')\n print('==> Preparing data..')\n\n torch.manual_seed(args.manualSeed)\n transforms_normalize = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(root='/mnt/storage0_8/torch_datasets/cifar-data', train=True, download=True, transform=transforms_normalize)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR10(root='/mnt/storage0_8/torch_datasets/cifar-data', train=False, download=True, transform=transforms_normalize)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)\n\n nc,H,W = trainset.__getitem__(0)[0].shape\n\n if model_name == 'VGG19':\n model = VGG('VGG19')\n model.load_state_dict(torch.load('./checkpoints/cifar10_vgg19.pth')['net'])\n elif model_name == 'ResNet101':\n model = ResNet101()\n model.load_state_dict(torch.load('./checkpoints/cifar10_resnet101.pth')['net'])\n elif model_name == 'DenseNet121':\n model = DenseNet121()\n model.load_state_dict(torch.load('./checkpoints/cifar10_dense121.pth')['net'])\n else:\n assert 0 \n\n\n dataset_mean = [0.4914, 0.4822, 0.4465]\n dataset_std = [0.2023, 0.1994, 0.2010]\n print(dataset_mean,dataset_std)\n\n mu = torch.Tensor((dataset_mean)).unsqueeze(-1).unsqueeze(-1).cuda()\n std = torch.Tensor((dataset_std)).unsqueeze(-1).unsqueeze(-1).cuda()\n unnormalize = lambda x: x*std + mu\n normalize = lambda x: (x-mu)/std\n\n\n for params in model.parameters():\n params.requires_grad = False\n model.eval()\n\n\n netAttacker = attack_model.Generator(1,nc,H)\n netAttacker.apply(weights_init)\n\n\n device_ids = [ i for i in range (torch.cuda.device_count())]\n\n print('gpuid:', device_ids)\n\n model= nn.DataParallel(model,device_ids=device_ids)\n netAttacker = nn.DataParallel(netAttacker,device_ids=device_ids)\n\n model = model.cuda()\n netAttacker = netAttacker.cuda()\n\n\n noise = torch.FloatTensor(1, 1, H, W)\n noise = noise.cuda()\n noise = Variable(noise)\n torch.nn.init.normal_(noise, mean=0, std=1.)\n\n loss_flow = Loss_flow()\n\n optimizer = torch.optim.Adam(netAttacker.parameters(), lr=lr, betas=(args.beta1, 0.999), weight_decay=args.l2reg)\n\n bestatt = 0.\n bestloss = 10000\n\n logger.info('Epoch \\t Time \\t Tr_loss \\t L \\t Tr_acc \\t Tr_stAtt \\t Tr_noiseAtt \\t Tr_Attack Rate ')\n\n for epoch in range(epochs):\n start_time = time.time()\n train_loss = 0\n train_acc = 0\n train_n = 0\n train_attack_rate = 0\n train_st_rate = 0\n train_noise_rate = 0\n train_ori_acc = 0\n skipped = 0\n no_skipped = 0\n \n netAttacker.train()\n model.eval()\n\n for i, (X, y) in enumerate(train_loader):\n\n X, y = X.cuda(), y.cuda()\n batch_size = X.size(0)\n\n optimizer.zero_grad()\n\n train_ori_logits = model(X) \n flow_field,perb_noise = netAttacker(noise)\n\n L = loss_flow(flow_field)\n flow_field = flow_field *args.tau/L \n perb_noise = perb_noise* allow\n\n X_st = flow_st(unnormalize(X),flow_field,batch_size) \n X_noise = unnormalize(X)+ perb_noise\n X_noise = normalize(torch.clamp(X_noise, 0, 1))\n X_adv = X_st +perb_noise\n X_adv = normalize(torch.clamp(X_adv, 0, 1))\n optimizer.zero_grad()\n\n logits_st = model(normalize(X_st))\n logits_noise = model(X_noise)\n logits_adv = model(X_adv)\n adv_lossall = F.cross_entropy(logits_adv, train_ori_logits.max(1)[1], reduction = 'none')+1 \n adv_loss = -torch.mean(torch.log(adv_lossall))\n adv_loss.backward()\n optimizer.step()\n\n train_ori_acc += (train_ori_logits.max(1)[1] == y).sum().item()\n train_loss += adv_loss.item() * y.size(0)\n train_attack_rate += ((logits_adv.max(1)[1] != train_ori_logits.max(1)[1])).sum().item()\n train_st_rate += ((logits_st.max(1)[1] != train_ori_logits.max(1)[1])).sum().item()\n train_noise_rate += ((logits_noise.max(1)[1] != train_ori_logits.max(1)[1])).sum().item()\n train_n += y.size(0)\n \n train_time = time.time()\n logger.info('%d \\t %.1f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f',\n epoch, train_time - start_time, train_loss / train_n, L.data.cpu(), train_ori_acc/(train_n+skipped),train_st_rate/train_n,train_noise_rate/train_n, train_attack_rate/train_n)\n\n if bestatt<train_attack_rate/train_n and bestloss>train_loss / train_n:\n bestloss = train_loss / train_n\n bestatt = train_attack_rate/train_n\n bestflow = flow_field\n bestnoise = perb_noise\n\n print('Best train ASR:',end = '\\t')\n print(bestatt) \n flow_field = bestflow\n perb_noise = bestnoise \n\n print('==> start testing ..')\n test_ori_acc = 0\n test_n = 0\n test_adv_loss = 0\n test_adv_acc = 0\n test_attack_rate = 0\n test_st_rate = 0\n test_noise_rate = 0\n\n start_time = time.time()\n\n clean_np = np.empty((0,nc, H, W))\n st_np = np.empty((0,nc, H, W))\n perb_np = np.empty((0, nc, H, W))\n clean_preds_np = np.empty(0)\n perb_preds_np = np.empty(0)\n skipped = 0\n no_skipped = 0\n model.eval()\n netAttacker.eval()\n\n test_l2 = []\n with torch.no_grad():\n for i, (X, y) in enumerate(test_loader):\n X, y = X.cuda(), y.cuda()\n\n batch_size = X.size(0)\n test_ori_logits = model(X)\n X_st = flow_st(unnormalize(X),flow_field,batch_size) \n X_noise = unnormalize(X)+ perb_noise\n X_noise = normalize(torch.clamp(X_noise, 0, 1))\n X_perb = X_st+ perb_noise\n X_perb = normalize(torch.clamp(X_perb, 0, 1))\n\n X_st = normalize(X_st)\n\n test_logits_st = model(X_st)\n test_logits_noise = model(X_noise)\n test_logits_adv = model(X_perb)\n test_ori_acc += (test_logits_adv.max(1)[1] == y).sum().item()\n adv_lossall = F.cross_entropy(test_logits_adv, test_ori_logits.max(1)[1], reduction = 'none')+1 \n adv_loss = -torch.mean(torch.log(adv_lossall))\n test_adv_loss += adv_loss.item() * y.size(0)\n success_bool = (test_logits_adv.max(1)[1] != test_ori_logits.max(1)[1])\n test_attack_rate += success_bool.sum().item()\n test_st_rate += ((test_logits_st.max(1)[1] != test_ori_logits.max(1)[1])).sum().item()\n test_noise_rate += ((test_logits_noise.max(1)[1] != test_ori_logits.max(1)[1])).sum().item()\n\n if len(clean_preds_np)<10:\n clean_np = np.append(clean_np, X[success_bool].data.cpu(),axis=0)\n st_np = np.append(st_np, X_st[success_bool].data.cpu(),axis=0)\n perb_np = np.append(perb_np, X_perb[success_bool].data.cpu(),axis=0)\n clean_preds_np = np.append(clean_preds_np, test_ori_logits.max(1)[1][success_bool].data.cpu())\n perb_preds_np = np.append(perb_preds_np,test_logits_adv.max(1)[1][success_bool].data.cpu())\n\n test_n += y.size(0)\n l2dist = cal_l2dist(unnormalize(X),unnormalize(X_perb))\n test_l2.append(l2dist)\n\n test_time = time.time()\n test_l2 = [x for x in test_l2 if str(x)!='nan']\n\n logger.info('Perb Test Acc \\t L2 \\t Time \\t Adv Test_loss \\t Te_stAtt \\t Te_noiseAtt\\t Te_Attack Rate ')\n logger.info('%.4f \\t %.4f \\t %.2f \\t %.4f \\t %.4f \\t %.4f \\t %.4f', test_ori_acc/(test_n+skipped),np.mean(test_l2),test_time - start_time, test_adv_loss/test_n, test_st_rate/test_n,test_noise_rate/test_n, test_attack_rate/test_n)\n\n clean = unnormalize(torch.from_numpy(clean_np[:10]).cuda()).cpu().clamp(0,1)\n st = unnormalize(torch.from_numpy(st_np[:10]).cuda()).cpu().clamp(0,1)\n adv = unnormalize(torch.from_numpy(perb_np[:10]).cuda()).cpu().clamp(0,1)\n\n middlenoise1 = st - clean\n middlenoise2 = adv - st\n for i in range(10):\n middlenoise1[i] = norm_ip(middlenoise1[i])\n middlenoise2[i] = norm_ip(perb_noise.detach().unsqueeze(0).cpu())\n\n fig = plt.figure(figsize=(10, 5))\n grid = vutils.make_grid(torch.cat((clean,middlenoise1,st,middlenoise2,adv)).float(),nrow=10)\n \n if not os.path.exists('savefig'):\n os.mkdir('savefig') \n plt.imsave('savefig/Cifar10.png',grid.numpy().transpose((1, 2, 0)))\n\n\n" ]
[ [ "torch.cat", "torch.cuda.manual_seed", "numpy.mean", "torch.load", "torch.nn.DataParallel", "numpy.empty", "torch.autograd.Variable", "torch.FloatTensor", "torch.manual_seed", "torch.nn.init.normal_", "torch.utils.data.DataLoader", "torch.Tensor", "torch.cuda.manual_seed_all", "torch.clamp", "matplotlib.pyplot.figure", "torch.cuda.device_count", "torch.cuda.empty_cache", "torch.log", "numpy.random.seed", "torch.no_grad", "torch.from_numpy" ] ]
endolith/wavelets
[ "81e803f71c3306e97a6fdbd9dd762356021bc14a" ]
[ "wavelets/wavelets.py" ]
[ "import numpy as np\nimport scipy\nimport scipy.signal\nimport scipy.optimize\nimport scipy.special\nfrom scipy.special import factorial\n\n__all__ = ['Morlet', 'Paul', 'DOG', 'Ricker', 'Marr', 'Mexican_hat']\n\n\nclass Morlet:\n def __init__(self, w0=6):\n \"\"\"w0 is the nondimensional frequency constant. If this is\n set too low then the wavelet does not sample very well: a\n value over 5 should be ok; Terrence and Compo set it to 6.\n \"\"\"\n self.w0 = w0\n if w0 == 6:\n # value of C_d from TC98\n self.C_d = 0.776\n\n def __call__(self, *args, **kwargs):\n return self.time(*args, **kwargs)\n\n def time(self, t, s=1.0, complete=True):\n \"\"\"\n Complex Morlet wavelet, centred at zero.\n\n Parameters\n ----------\n t : float\n Time. If s is not specified, this can be used as the\n non-dimensional time t/s.\n s : float\n Scaling factor. Default is 1.\n complete : bool\n Whether to use the complete or the standard version.\n\n Returns\n -------\n out : complex\n Value of the Morlet wavelet at the given time\n\n See Also\n --------\n scipy.signal.gausspulse\n\n Notes\n -----\n The standard version::\n\n pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))\n\n This commonly used wavelet is often referred to simply as the\n Morlet wavelet. Note that this simplified version can cause\n admissibility problems at low values of `w`.\n\n The complete version::\n\n pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))\n\n The complete version of the Morlet wavelet, with a correction\n term to improve admissibility. For `w` greater than 5, the\n correction term is negligible.\n\n Note that the energy of the return wavelet is not normalised\n according to `s`.\n\n The fundamental frequency of this wavelet in Hz is given\n by ``f = 2*s*w*r / M`` where r is the sampling rate.\n\n \"\"\"\n w = self.w0\n\n x = t / s\n\n output = np.exp(1j * w * x)\n\n if complete:\n output -= np.exp(-0.5 * (w ** 2))\n\n output *= np.exp(-0.5 * (x ** 2)) * np.pi ** (-0.25)\n\n return output\n\n # Fourier wavelengths\n def fourier_period(self, s):\n \"\"\"Equivalent Fourier period of Morlet\"\"\"\n return 4 * np.pi * s / (self.w0 + (2 + self.w0 ** 2) ** .5)\n\n def scale_from_period(self, period):\n \"\"\"\n Compute the scale from the fourier period.\n Returns the scale\n \"\"\"\n # Solve 4 * np.pi * scale / (w0 + (2 + w0 ** 2) ** .5)\n # for s to obtain this formula\n coeff = np.sqrt(self.w0 * self.w0 + 2)\n return (period * (coeff + self.w0)) / (4. * np.pi)\n\n # Frequency representation\n def frequency(self, w, s=1.0):\n \"\"\"Frequency representation of Morlet.\n\n Parameters\n ----------\n w : float\n Angular frequency. If `s` is not specified, i.e. set to 1,\n this can be used as the non-dimensional angular\n frequency w * s.\n s : float\n Scaling factor. Default is 1.\n\n Returns\n -------\n out : complex\n Value of the Morlet wavelet at the given frequency\n \"\"\"\n x = w * s\n # Heaviside mock\n Hw = np.array(w)\n Hw[w <= 0] = 0\n Hw[w > 0] = 1\n return np.pi ** -.25 * Hw * np.exp((-(x - self.w0) ** 2) / 2)\n\n def coi(self, s):\n \"\"\"The e folding time for the autocorrelation of wavelet\n power at each scale, i.e. the timescale over which an edge\n effect decays by a factor of 1/e^2.\n\n This can be worked out analytically by solving\n\n |Y_0(T)|^2 / |Y_0(0)|^2 = 1 / e^2\n \"\"\"\n return 2 ** .5 * s\n\n\nclass Paul:\n def __init__(self, m=4):\n \"\"\"Initialise a Paul wavelet function of order `m`.\n \"\"\"\n self.m = m\n\n def __call__(self, *args, **kwargs):\n return self.time(*args, **kwargs)\n\n def time(self, t, s=1.0):\n \"\"\"\n Complex Paul wavelet, centred at zero.\n\n Parameters\n ----------\n t : float\n Time. If `s` is not specified, i.e. set to 1, this can be\n used as the non-dimensional time t/s.\n s : float\n Scaling factor. Default is 1.\n\n Returns\n -------\n out : complex\n Value of the Paul wavelet at the given time\n\n The Paul wavelet is defined (in time) as::\n\n (2 ** m * i ** m * m!) / (pi * (2 * m)!) \\\n * (1 - i * t / s) ** -(m + 1)\n\n \"\"\"\n m = self.m\n x = t / s\n\n const = (2 ** m * 1j ** m * factorial(m)) \\\n / (np.pi * factorial(2 * m)) ** .5\n functional_form = (1 - 1j * x) ** -(m + 1)\n\n output = const * functional_form\n\n return output\n\n # Fourier wavelengths\n def fourier_period(self, s):\n \"\"\"Equivalent Fourier period of Paul\"\"\"\n return 4 * np.pi * s / (2 * self.m + 1)\n\n def scale_from_period(self, period):\n \"\"\"\n Compute the scale from the fourier period.\n Returns the scale\n \"\"\"\n # Solve 4 * np.pi * scale / (2 * m + 1) for s\n return period * (2 * self.m + 1) / (4 * np.pi)\n\n # Frequency representation\n def frequency(self, w, s=1.0):\n \"\"\"Frequency representation of Paul.\n\n Parameters\n ----------\n w : float\n Angular frequency. If `s` is not specified, i.e. set to 1,\n this can be used as the non-dimensional angular\n frequency w * s.\n s : float\n Scaling factor. Default is 1.\n\n Returns\n -------\n out : complex\n Value of the Paul wavelet at the given frequency\n\n \"\"\"\n m = self.m\n x = w * s\n # Heaviside mock\n Hw = 0.5 * (np.sign(x) + 1)\n\n # prefactor\n const = 2 ** m / (m * factorial(2 * m - 1)) ** .5\n\n functional_form = Hw * (x) ** m * np.exp(-x)\n\n output = const * functional_form\n\n return output\n\n def coi(self, s):\n \"\"\"The e folding time for the autocorrelation of wavelet\n power at each scale, i.e. the timescale over which an edge\n effect decays by a factor of 1/e^2.\n\n This can be worked out analytically by solving\n\n |Y_0(T)|^2 / |Y_0(0)|^2 = 1 / e^2\n \"\"\"\n return s / 2 ** .5\n\n\nclass DOG:\n def __init__(self, m=2):\n \"\"\"Initialise a Derivative of Gaussian wavelet of order `m`.\"\"\"\n if m == 2:\n # value of C_d from TC98\n self.C_d = 3.541\n elif m == 6:\n self.C_d = 1.966\n else:\n pass\n self.m = m\n\n def __call__(self, *args, **kwargs):\n return self.time(*args, **kwargs)\n\n def time(self, t, s=1.0):\n \"\"\"\n Return a Derivative of Gaussian wavelet,\n\n When m = 2, this is also known as the \"Mexican hat\", \"Marr\"\n or \"Ricker\" wavelet.\n\n It models the function::\n\n ``A d^m/dx^m exp(-x^2 / 2)``,\n\n where ``A = (-1)^(m+1) / (gamma(m + 1/2))^.5``\n and ``x = t / s``.\n\n Note that the energy of the return wavelet is not normalised\n according to `s`.\n\n Parameters\n ----------\n t : float\n Time. If `s` is not specified, this can be used as the\n non-dimensional time t/s.\n s : scalar\n Width parameter of the wavelet.\n\n Returns\n -------\n out : float\n Value of the DOG wavelet at the given time\n\n Notes\n -----\n The derivative of the Gaussian has a polynomial representation:\n\n from http://en.wikipedia.org/wiki/Gaussian_function:\n\n \"Mathematically, the derivatives of the Gaussian function can be\n represented using Hermite functions. The n-th derivative of the\n Gaussian is the Gaussian function itself multiplied by the n-th\n Hermite polynomial, up to scale.\"\n\n http://en.wikipedia.org/wiki/Hermite_polynomial\n\n Here, we want the 'probabilists' Hermite polynomial (He_n),\n which is computed by scipy.special.hermitenorm\n\n \"\"\"\n x = t / s\n m = self.m\n\n # compute the Hermite polynomial (used to evaluate the\n # derivative of a Gaussian)\n He_n = scipy.special.hermitenorm(m)\n gamma = scipy.special.gamma\n\n const = (-1) ** (m + 1) / gamma(m + 0.5) ** .5\n function = He_n(x) * np.exp(-x ** 2 / 2)\n\n return const * function\n\n def fourier_period(self, s):\n \"\"\"Equivalent Fourier period of derivative of Gaussian\"\"\"\n return 2 * np.pi * s / (self.m + 0.5) ** .5\n\n def scale_from_period(self, period):\n \"\"\"\n Compute the scale from the fourier period.\n Returns the scale\n \"\"\"\n # Solve 2 * np.pi * s / (np.sqrt(m + 1/2)) for s\n return period * np.sqrt(self.m + 0.5) / (2 * np.pi)\n\n def frequency(self, w, s=1.0):\n \"\"\"Frequency representation of derivative of Gaussian.\n\n Parameters\n ----------\n w : float\n Angular frequency. If `s` is not specified, i.e. set to 1,\n this can be used as the non-dimensional angular\n frequency w * s.\n s : float\n Scaling factor. Default is 1.\n\n Returns\n -------\n out : complex\n Value of the derivative of Gaussian wavelet at the\n given time\n \"\"\"\n m = self.m\n x = s * w\n gamma = scipy.special.gamma\n const = -1j ** m / gamma(m + 0.5) ** .5\n function = x ** m * np.exp(-x ** 2 / 2)\n return const * function\n\n def coi(self, s):\n \"\"\"The e folding time for the autocorrelation of wavelet\n power at each scale, i.e. the timescale over which an edge\n effect decays by a factor of 1/e^2.\n\n This can be worked out analytically by solving\n\n |Y_0(T)|^2 / |Y_0(0)|^2 = 1 / e^2\n \"\"\"\n return 2 ** .5 * s\n\n\nclass Ricker(DOG):\n def __init__(self):\n \"\"\"The Ricker, aka Marr / Mexican Hat, wavelet is a\n derivative of Gaussian order 2.\n \"\"\"\n DOG.__init__(self, m=2)\n # value of C_d from TC98\n self.C_d = 3.541\n\n\n# aliases for DOG2\nMarr = Ricker\nMexican_hat = Ricker\n" ]
[ [ "scipy.special.hermitenorm", "numpy.array", "numpy.exp", "numpy.sign", "scipy.special.factorial", "numpy.sqrt" ] ]
shunlean/carbondata
[ "6891083b4eecb21e132538b5f84c2240675e8af3" ]
[ "python/pycarbon/core/carbon_arrow_reader_worker.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom __future__ import division\n\nimport hashlib\nimport operator\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\n\nfrom petastorm.cache import NullCache\nfrom petastorm.workers_pool.worker_base import WorkerBase\nfrom petastorm.arrow_reader_worker import ArrowReaderWorkerResultsQueueReader\n\n\nclass ArrowCarbonReaderWorker(WorkerBase):\n def __init__(self, worker_id, publish_func, args):\n super(ArrowCarbonReaderWorker, self).__init__(worker_id, publish_func, args)\n\n self._filesystem = args[0]\n self._dataset_path = args[1]\n self._schema = args[2]\n self._ngram = args[3]\n self._split_pieces = args[4]\n self._local_cache = args[5]\n self._transform_spec = args[6]\n\n if self._ngram:\n raise NotImplementedError('ngrams are not supported by ArrowReaderWorker')\n\n # We create datasets lazily in the first invocation of 'def process'. This speeds up startup time since\n # all Worker constructors are serialized\n self._dataset = None\n\n @staticmethod\n def new_results_queue_reader():\n return ArrowReaderWorkerResultsQueueReader()\n\n # pylint: disable=arguments-differ\n def process(self, piece_index, worker_predicate, shuffle_row_drop_partition):\n \"\"\"Main worker function. Loads and returns all rows matching the predicate from a blocklet\n\n Looks up the requested piece (a single row-group in a carbon file). If a predicate is specified,\n columns needed by the predicate are loaded first. If no rows in the blocklet matches the predicate criteria\n the rest of the columns are not loaded.\n\n :param piece_index:\n :param shuffle_row_drop_partition: A tuple 2 of the current row drop partition and the total number\n of partitions.\n :return:\n \"\"\"\n\n piece = self._split_pieces[piece_index]\n\n if not isinstance(self._local_cache, NullCache):\n if worker_predicate:\n raise RuntimeError('Local cache is not supported together with predicates, '\n 'unless the dataset is partitioned by the column the predicate operates on.')\n if shuffle_row_drop_partition[1] != 1:\n raise RuntimeError('Local cache is not supported together with shuffle_row_drop_partitions > 1')\n\n if worker_predicate:\n all_cols = self._load_rows_with_predicate(piece, worker_predicate, shuffle_row_drop_partition)\n else:\n # Using hash of the dataset path with the relative path in order to:\n # 1. Make sure if a common cache serves multiple processes (e.g. redis), we don't have conflicts\n # 2. Dataset path is hashed, to make sure we don't create too long keys, which maybe incompatible with\n # some cache implementations\n # 3. Still leave relative path and the piece_index in plain text to make it easier to debug\n cache_key = '{}:{}:{}'.format(hashlib.md5(self._dataset_path.encode('utf-8')).hexdigest(),\n piece.path, piece_index)\n all_cols = self._local_cache.get(cache_key,\n lambda: self._load_rows(piece, shuffle_row_drop_partition))\n\n if all_cols:\n self.publish_func(all_cols)\n\n def _load_rows(self, piece, shuffle_row_drop_range):\n \"\"\"Loads all rows from a piece\"\"\"\n\n # pyarrow would fail if we request a column names that the dataset is partitioned by, so we strip them from\n # the `columns` argument.\n # partitions = self._dataset.partitions\n column_names_in_schema = list(field.name for field in self._schema.fields.values())\n # column_names = column_names_in_schema - partitions.partition_names\n\n result = self._read_with_shuffle_row_drop(piece, column_names_in_schema, shuffle_row_drop_range)\n\n if self._transform_spec:\n result = pa.Table.from_pandas(self._transform_spec.func(result.to_pandas()), preserve_index=False)\n\n return result\n\n def _load_rows_with_predicate(self, piece, worker_predicate, shuffle_row_drop_partition):\n \"\"\"Loads all rows that match a predicate from a piece\"\"\"\n\n # 1. Read all columns needed by predicate\n # 2. Apply the predicate. If nothing matches, exit early\n # 3. Read the remaining columns\n\n # Split all column names into ones that are needed by predicateand the rest.\n predicate_column_names = set(worker_predicate.get_fields())\n\n if not predicate_column_names:\n raise ValueError('At least one field name must be returned by predicate\\'s get_field() method')\n\n all_schema_names = set(field.name for field in self._schema.fields.values())\n\n invalid_column_names = predicate_column_names - all_schema_names\n if invalid_column_names:\n raise ValueError('At least some column names requested by the predicate ({}) '\n 'are not valid schema names: ({})'.format(', '.join(invalid_column_names),\n ', '.join(all_schema_names)))\n\n # Split into 'columns for predicate evaluation' and 'other columns'. We load 'other columns' only if at\n # least one row in the blocklet matched the predicate\n other_column_names = all_schema_names - predicate_column_names\n\n # Read columns needed for the predicate\n predicate_column_names_list = list(predicate_column_names)\n predicates_table = self._read_with_shuffle_row_drop(piece, predicate_column_names_list,\n shuffle_row_drop_partition)\n\n predicates_data_frame = predicates_table.to_pandas()\n\n match_predicate_mask = worker_predicate.do_include(predicates_data_frame)\n erase_mask = match_predicate_mask.map(operator.not_)\n\n # Don't have anything left after filtering? Exit early.\n if erase_mask.all():\n return []\n\n predicates_data_frame[erase_mask] = None\n\n if other_column_names:\n # Read remaining columns\n other_column_names_list = list(other_column_names)\n other_table = self._read_with_shuffle_row_drop(piece, other_column_names_list,\n shuffle_row_drop_partition)\n other_data_frame = other_table.to_pandas()\n other_data_frame[erase_mask] = None\n\n # Partition-by columns will appear in both other and predicate data frames. Deduplicate.\n columns_from_predicates = predicates_data_frame.columns.difference(other_data_frame.columns)\n result_data_frame = pd.merge(predicates_data_frame[columns_from_predicates], other_data_frame,\n copy=False, left_index=True, right_index=True)\n else:\n result_data_frame = predicates_data_frame\n\n result = result_data_frame[match_predicate_mask]\n\n if self._transform_spec:\n result = self._transform_spec.func(result)\n\n return pa.Table.from_pandas(result, preserve_index=False)\n\n def _read_with_shuffle_row_drop(self, piece, column_names, shuffle_row_drop_partition):\n table = piece.read_all(\n columns=column_names,\n )\n\n num_rows = len(table)\n num_partitions = shuffle_row_drop_partition[1]\n this_partition = shuffle_row_drop_partition[0]\n\n if num_partitions > 1:\n data_frame_pandas = table.to_pandas()\n partition_indexes = np.floor(np.arange(num_rows) / (float(num_rows) / min(num_rows, num_partitions)))\n\n table = pa.Table.from_pandas(data_frame_pandas.loc[partition_indexes == this_partition],\n preserve_index=False)\n\n return table\n" ]
[ [ "numpy.arange", "pandas.merge" ] ]
CMU-IDS-2022/final-project-champion
[ "248902bf7ee09d0de953593e51c8bc9c6a68e17d" ]
[ "Word_cloud_data_processing.py" ]
[ "import os\nimport pandas as pd\nimport numpy\nimport pytrends\n\nfrom pytrends.request import TrendReq\n\ntop10cities = set([\"Boston\", \"Chicago\", \"Detroit\", \"Philadelphia\", \"Pittsburgh\", \"Seattle\", \"Washington\"]) # \"San Francisco\", \"Los Angeles\", \"New York\", \n\n# filter to leave top 10 cities\ndef filterTop10Cities(df):\n for index, row in df.iterrows():\n if row['City'] not in top10cities:\n df.drop(index, inplace=True)\n return df\n\n\ndef writeIntoFile(df, city):\n outdir = \"data/\" + city.replace(\" \", \"\")\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n # TODO: change to output file name before running script\n df.to_csv(f\"{outdir}/googleTrends.csv\")\n\n\nif __name__ == \"__main__\":\n # TODO: change to unprocessed file name before running script\n # housing_df = pd.read_csv(\"data/neighborhood_1br_rental_price.csv\")\n pytrend = TrendReq(retries=5)\n\n housing_df = pd.read_csv(\"data/neighborhood_1br_rental_price.csv\")\n\n housing_df = filterTop10Cities(housing_df)\n\n city_neighborhood_mapping = {}\n for city in top10cities:\n temp_df = housing_df[housing_df['City'] == city]\n res = temp_df['Neighborhood']\n city_neighborhood_mapping[city] = list(set(res))\n\n print(city_neighborhood_mapping)\n\n for k in city_neighborhood_mapping.keys():\n KEYWORDS = city_neighborhood_mapping[k]\n KEYWORDS_CODES = []\n for i in KEYWORDS:\n try:\n suggestion = pytrend.suggestions(keyword=i)\n except:\n print(\"unable to generate suggestion for keyword:\", i)\n if suggestion:\n KEYWORDS_CODES.append(suggestion[0])\n print(KEYWORDS_CODES)\n\n df_CODES= pd.DataFrame(KEYWORDS_CODES)\n print(df_CODES)\n EXACT_KEYWORDS=df_CODES['mid'].to_list()\n DATE_INTERVAL='2018-01-01 2022-03-01'\n COUNTRY=[\"US\"] #Use this link for iso country code\n CATEGORY=0 # Use this link to select categories\n SEARCH_TYPE='' #default is 'web searches',others include 'images','news','youtube','froogle' (google shopping)\n\n Individual_EXACT_KEYWORD = list(zip(*[iter(EXACT_KEYWORDS)]*1))\n Individual_EXACT_KEYWORD = [list(x) for x in Individual_EXACT_KEYWORD]\n dicti = {}\n i = 1\n for Country in COUNTRY:\n for keyword in Individual_EXACT_KEYWORD:\n try:\n pytrend.build_payload(kw_list=keyword, \n timeframe = DATE_INTERVAL, \n geo = Country, \n cat=CATEGORY,\n gprop=SEARCH_TYPE) \n dicti[i] = pytrend.interest_over_time()\n i+=1\n except:\n print(\"could not be process keyword:\", keyword)\n\n df_trends = pd.concat(dicti, axis=1)\n\n result = ['date'] + list(df_CODES['title'])\n\n df_trends.columns = df_trends.columns.droplevel(0) #drop outside header\n df_trends = df_trends.drop('isPartial', axis = 1) #drop \"isPartial\"\n df_trends.reset_index(level=0,inplace=True) #reset_index\n result = result[:len(df_trends.columns)]\n df_trends.columns = result\n\n writeIntoFile(df_trends, k)\n\n\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.concat" ] ]
Anupam0401/Photo-to-cartoon-convertor
[ "fbd2a38037534778d8caadce1e96c40a4a4edb4d" ]
[ "test_onnx.py" ]
[ "import os\r\nimport cv2\r\nimport numpy as np\r\nimport onnxruntime\r\nimport argparse\r\nfrom utils import Preprocess\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--photo_path', type=str, help='input photo path')\r\nparser.add_argument('--save_path', type=str, help='cartoon save path')\r\nargs = parser.parse_args()\r\n\r\nos.makedirs(os.path.dirname(args.save_path), exist_ok=True)\r\n\r\nclass Photo2Cartoon:\r\n def __init__(self):\r\n self.pre = Preprocess()\r\n \r\n assert os.path.exists('./models/photo2cartoon_weights.onnx'), \"[Step1: load weights] Can not find 'photo2cartoon_weights.onnx' in folder 'models!!!'\"\r\n self.session = onnxruntime.InferenceSession('./models/photo2cartoon_weights.onnx')\r\n print('[Step1: load weights] success!')\r\n\r\n def inference(self, img):\r\n # face alignment and segmentation\r\n face_rgba = self.pre.process(img)\r\n if face_rgba is None:\r\n print('[Step2: face detect] can not detect face!!!')\r\n return None\r\n \r\n print('[Step2: face detect] success!')\r\n face_rgba = cv2.resize(face_rgba, (256, 256), interpolation=cv2.INTER_AREA)\r\n face = face_rgba[:, :, :3].copy()\r\n mask = face_rgba[:, :, 3][:, :, np.newaxis].copy() / 255.\r\n face = (face*mask + (1-mask)*255) / 127.5 - 1\r\n\r\n face = np.transpose(face[np.newaxis, :, :, :], (0, 3, 1, 2)).astype(np.float32)\r\n\r\n # inference\r\n cartoon = self.session.run(['output'], input_feed={'input':face})\r\n\r\n # post-process\r\n cartoon = np.transpose(cartoon[0][0], (1, 2, 0))\r\n cartoon = (cartoon + 1) * 127.5\r\n cartoon = (cartoon * mask + 255 * (1 - mask)).astype(np.uint8)\r\n cartoon = cv2.cvtColor(cartoon, cv2.COLOR_RGB2BGR)\r\n print('[Step3: photo to cartoon] success!')\r\n return cartoon\r\n\r\n\r\nif __name__ == '__main__':\r\n img = cv2.cvtColor(cv2.imread(args.photo_path), cv2.COLOR_BGR2RGB)\r\n c2p = Photo2Cartoon()\r\n cartoon = c2p.inference(img)\r\n if cartoon is not None:\r\n cv2.imwrite(args.save_path, cartoon)\r\n print('Cartoon portrait has been saved successfully!')\r\n" ]
[ [ "numpy.transpose" ] ]
WeixinYang/PSFDataset
[ "f29b37489c580ad3c677bb9385a721cc57da60e4" ]
[ "psfdataset/transforms/spatial/crop.py" ]
[ "# -----------------------------------------------------------\n# Class to crop a sequence of keypoints to their common spatial bounding box.\n#\n# (C) 2020 Kevin Schlegel, Oxford, United Kingdom\n# Released under Apache License, Version 2.0\n# email [email protected]\n# -----------------------------------------------------------\nimport numpy as np\n\nfrom ...types import DescriptionDict\n\n\nclass Crop:\n \"\"\"\n Crop the keypoints to their (spatial) bounding box.\n\n Crop takes an input of the form [frame,landmark,coords] and translates\n the spatial coordinates so that the top, leftmost landmark is at (0,0).\n Thus np.amax will return the exact bounding box.\n\n If the the landmarks have a confidence score as last dimension a confidence\n of 0 usually indicates missing data (i.e which could not be detected). The\n confidence value can be used to ignore those missing data point (which\n usually are equal to 0).\n\n Methods\n -------\n get_description()\n Return a dictionary describing the properties of the transformation.\n \"\"\"\n def __init__(self, ignore_missing: bool = False) -> None:\n self._ignore_missing = ignore_missing\n\n def __call__(self, sample: np.ndarray) -> np.ndarray:\n if self._ignore_missing:\n mins = np.full(sample.shape[2] - 1, np.inf)\n for frame in range(sample.shape[0]):\n for landmark in range(sample.shape[1]):\n if sample[frame][landmark][-1] != 0:\n mins = np.minimum(mins, sample[frame][landmark][0:-1])\n transformed = np.zeros(sample.shape, sample.dtype)\n for frame in range(sample.shape[0]):\n for landmark in range(sample.shape[1]):\n if sample[frame][landmark][-1] != 0:\n transformed[frame][landmark][0:-1] = \\\n sample[frame][landmark][0:-1] - mins\n transformed[frame][landmark][-1] = \\\n sample[frame][landmark][-1]\n else:\n mins = np.amin(sample, axis=(0, 1))\n transformed = sample - mins\n return transformed\n\n def get_description(self) -> DescriptionDict:\n \"\"\"\n Returns a dictionary describing all properties of the transformation.\n\n Returns\n -------\n dict\n Description of the transformation\n \"\"\"\n desc: DescriptionDict = {\"(s)Crop\": True}\n # this is only attached when True because for datasets without\n # confidence score this setting should always be False\n if self._ignore_missing:\n desc[\"(s)crop/ignore_missing\"] = True\n return desc\n" ]
[ [ "numpy.minimum", "numpy.full", "numpy.zeros", "numpy.amin" ] ]
skarakulak/hfsmx_for_LM
[ "8395c44057aa68e2889f4cd64d4dd511bd650ec8" ]
[ "examples/penn_treebank/run_language_modeling_hsfmx.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport shutil\nfrom typing import Dict, List, Tuple, Optional\n\nimport numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n MODEL_WITH_LM_HEAD_MAPPING,\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModelWithLMHead,\n AutoTokenizer,\n PreTrainedModel,\n PreTrainedTokenizer,\n get_linear_schedule_with_warmup,\n)\n\nfrom BertForMaskedLM_hsfmx import BertForMaskedLM_hsfmx\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\ndef read_pkl(path):\n assert os.path.isfile(path), f'file does not exist: {path}'\n with open(path, 'rb') as handle:\n obj = pickle.load(handle)\n return obj\n\n\ndef map2new_idx(labels, new_idx_mapping):\n return torch.Tensor([\n [i if i == -100 else new_idx_mapping[i] if i in new_idx_mapping else 0 for i in row]\n for row in labels.tolist()]).long().to(labels.device)\n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):\n assert os.path.isfile(file_path)\n\n block_size = block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)\n\n directory, filename = os.path.split(file_path)\n cached_features_file = os.path.join(\n directory, args.model_type + \"_cached_lm_\" + str(block_size) + \"_\" + filename\n )\n\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n with open(cached_features_file, \"rb\") as handle:\n self.examples = pickle.load(handle)\n else:\n logger.info(\"Creating features from dataset file at %s\", directory)\n\n self.examples = []\n try:\n with open(file_path, encoding=\"utf-8\") as f:\n text = f.read()\n except:\n with open(file_path, encoding=\"ISO-8859-1\") as f:\n text = f.read()\n\n tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))\n\n for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size\n self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size]))\n # Note that we are loosing the last truncated example here for the sake of simplicity (no padding)\n # If your dataset is small, first you should loook for a bigger one :-) and second you\n # can change this behavior by adding (model specific) padding.\n\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n with open(cached_features_file, \"wb\") as handle:\n pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, item):\n return torch.tensor(self.examples[item], dtype=torch.long)\n\n\nclass LineByLineTextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):\n assert os.path.isfile(file_path)\n # Here, we do not cache the features, operating under the assumption\n # that we will soon use fast multithreaded tokenizers from the\n # `tokenizers` repo everywhere =)\n logger.info(\"Creating features from dataset file at %s\", file_path)\n\n with open(file_path, encoding=\"utf-8\") as f:\n lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]\n\n self.examples = tokenizer.batch_encode_plus(lines, add_special_tokens=True, max_length=block_size)[\"input_ids\"]\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n return torch.tensor(self.examples[i], dtype=torch.long)\n\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False):\n file_path = args.eval_data_file if evaluate else args.train_data_file\n if args.line_by_line:\n return LineByLineTextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)\n else:\n return TextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef _sorted_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = glob.glob(os.path.join(args.output_dir, \"{}-*\".format(checkpoint_prefix)))\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(\".*{}-([0-9]+)\".format(checkpoint_prefix), path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n return checkpoints_sorted\n\n\ndef _rotate_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> None:\n if not args.save_total_limit:\n return\n if args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)\n if len(checkpoints_sorted) <= args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n\ndef mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args, new_idx_mapping: Optional[dict]=None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. \"\"\"\n\n if tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n probability_matrix = torch.full(labels.shape, args.mlm_probability)\n special_tokens_mask = [\n tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if tokenizer._pad_token is not None:\n padding_mask = labels.eq(tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n if args.idx_mapping_path is not None:\n labels = map2new_idx(labels, new_idx_mapping)\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n\ndef train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, fix_transformer=False) -> Tuple[int, float]:\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n new_idx_mapping = None if args.idx_mapping_path is None else read_pkl(args.idx_mapping_path)\n\n def collate(examples: List[torch.Tensor]):\n if tokenizer._pad_token is None:\n return pad_sequence(examples, batch_first=True)\n return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)\n\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate\n )\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n model = model.module if hasattr(model, \"module\") else model # Take care of distributed/parallel training\n model.resize_token_embeddings(len(tokenizer))\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ] if not fix_transformer else model.hsfmx.parameters()\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if (\n args.model_name_or_path\n and os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\"))\n and os.path.isfile(os.path.join(args.model_name_or_path, \"scheduler.pt\"))\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if args.model_name_or_path and os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss, logging_loss = 0.0, 0.0\n\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs) if not fix_transformer else args.epochs_train_hsfmx,\n desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n set_seed(args) # Added here for reproducibility\n for epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n\n if args.local_rank != -1:\n train_sampler.set_epoch(epoch)\n\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n inputs, labels = mask_tokens(batch, tokenizer, args, new_idx_mapping) if args.mlm else (batch, batch)\n inputs = inputs.to(args.device)\n labels = labels.to(args.device)\n model.train()\n # TODO: using `calc_log_prob` gives better perplexity, but doubles the training time.\n # optimize the implementation\n outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n checkpoint_prefix = \"checkpoint\"\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"{}-{}\".format(checkpoint_prefix, global_step))\n os.makedirs(output_dir, exist_ok=True)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n _rotate_checkpoints(args, checkpoint_prefix)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix=\"\") -> Dict:\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_output_dir = args.output_dir\n\n eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)\n new_idx_mapping = None if args.idx_mapping_path is None else read_pkl(args.idx_mapping_path)\n\n if args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir, exist_ok=True)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n\n def collate(examples: List[torch.Tensor]):\n if tokenizer._pad_token is None:\n return pad_sequence(examples, batch_first=True)\n return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate\n )\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n inputs, labels = mask_tokens(batch, tokenizer, args, new_idx_mapping) if args.mlm else (batch, batch)\n inputs = inputs.to(args.device)\n labels = labels.to(args.device)\n\n with torch.no_grad():\n if args.hsfmx:\n outputs = model(inputs, masked_lm_labels=labels, calc_log_prob=True)\n elif args.mlm:\n outputs = model(inputs, masked_lm_labels=labels, )\n else:\n outputs = model(inputs, labels=labels)\n lm_loss = outputs[0]\n eval_loss += lm_loss.mean().item()\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n perplexity = torch.exp(torch.tensor(eval_loss))\n\n result = {\"perplexity\": perplexity}\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--train_data_file\", default=None, type=str, required=True, help=\"The input training data file (a text file).\"\n )\n parser.add_argument(\n \"--output_dir\",\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--model_type\", type=str, required=True, help=\"The model architecture to be trained or fine-tuned.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--eval_data_file\",\n default=None,\n type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\",\n )\n parser.add_argument(\n \"--line_by_line\",\n action=\"store_true\",\n help=\"Whether distinct lines of text in the dataset are to be handled as distinct sequences.\",\n )\n parser.add_argument(\n \"--should_continue\", action=\"store_true\", help=\"Whether to continue from latest checkpoint in output_dir\"\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n help=\"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\",\n )\n parser.add_argument(\n \"--hsfmx\",\n action=\"store_true\",\n help=\"whether or not to run with hsfmx\"\n )\n parser.add_argument(\n \"--trees_path\",\n type=str,\n help=\"path of the trees to construct hsfmx\"\n )\n parser.add_argument(\n \"--idx_mapping_path\",\n type=str,\n default=None,\n help=\"given a path, reads the mapping to original tokens to the selected (most frequent) tokens, and applies it\"\n )\n parser.add_argument(\n \"--mlm\", action=\"store_true\", help=\"Train with masked-language modeling loss instead of language modeling.\"\n )\n parser.add_argument(\n \"--mlm_probability\", type=float, default=0.15, help=\"Ratio of tokens to mask for masked language modeling loss\"\n )\n\n parser.add_argument(\n \"--config_name\",\n default=None,\n type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=None,\n type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=None,\n type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)\",\n )\n parser.add_argument(\n \"--block_size\",\n default=-1,\n type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\"\n )\n parser.add_argument(\"--epochs_train_hsfmx\", default=10, type=int, help=\"How many epochs to train the hsfmx for before fine-tuning the whole model.\")\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=1.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--save_total_limit\",\n type=int,\n default=None,\n help=\"Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default\",\n )\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if args.model_type in [\"bert\", \"roberta\", \"distilbert\", \"camembert\"] and not args.mlm:\n raise ValueError(\n \"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm \"\n \"flag (masked language modeling).\"\n )\n if args.eval_data_file is None and args.do_eval:\n raise ValueError(\n \"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file \"\n \"or remove the --do_eval argument.\"\n )\n if args.should_continue:\n sorted_checkpoints = _sorted_checkpoints(args)\n if len(sorted_checkpoints) == 0:\n raise ValueError(\"Used --should_continue but no checkpoint was found in --output_dir.\")\n else:\n args.model_name_or_path = sorted_checkpoints[-1]\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n and not args.should_continue\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab\n\n if args.config_name:\n config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n # When we release a pip version exposing CONFIG_MAPPING,\n # we can do `config = CONFIG_MAPPING[args.model_type]()`.\n raise ValueError(\n \"You are instantiating a new config instance from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --config_name\"\n )\n\n if args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\"\n )\n\n if args.block_size <= 0:\n args.block_size = tokenizer.max_len\n # Our input block size will be the max possible for the model\n else:\n args.block_size = min(args.block_size, tokenizer.max_len)\n\n if args.hsfmx:\n model = BertForMaskedLM_hsfmx(\n config,\n trees_path=args.trees_path,\n device=args.device\n )\n elif args.model_name_or_path:\n model = AutoModelWithLMHead.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n else:\n logger.info(\"Training new model from scratch\")\n model = AutoModelWithLMHead.from_config(config)\n\n model.to(args.device)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)\n\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n if args.epochs_train_hsfmx:\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, fix_transformer=True)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n torch.save(model, os.path.join(args.output_dir, \"model.pkl\"))\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n result = evaluate(args, model, tokenizer, prefix='')\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.distributed.get_world_size", "torch.utils.data.RandomSampler", "torch.cuda.is_available", "torch.nn.DataParallel", "torch.distributed.init_process_group", "torch.manual_seed", "torch.tensor", "torch.utils.data.DataLoader", "torch.device", "torch.cuda.manual_seed_all", "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.SequentialSampler", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.device_count", "torch.cuda.set_device", "torch.full", "torch.distributed.barrier", "numpy.random.seed", "torch.no_grad", "torch.bernoulli", "torch.utils.data.distributed.DistributedSampler" ] ]
arthur-x/SimplySAC
[ "f02ca729b073759915400450cb2b905600805180" ]
[ "plot.py" ]
[ "import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\ndef plot_curve(env):\n min_l = 2001\n ret_list = []\n for s in range(5):\n df = pd.read_csv('saves/' + str(env+1) + '/log' + str(s+1) + '.csv')\n ret = df[['return']].to_numpy().transpose(1, 0)[0]\n if len(ret) < min_l:\n min_l = len(ret)\n for i in range(len(ret) - 1):\n ret[i + 1] = ret[i] * 0.9 + ret[i + 1] * 0.1\n ret_list.append(ret)\n data = np.zeros((5, min_l))\n for s in range(5):\n data[s, :] = ret_list[s][:min_l]\n mean = np.mean(data, axis=0)\n mini = np.min(data, axis=0)\n maxi = np.max(data, axis=0)\n stamps = np.array([i * 1e-3 for i in range(min_l)])\n plt.plot(stamps, mean, label='SAC', lw=1.0)\n plt.fill_between(stamps, mini, maxi, alpha=0.2)\n plt.title(env_list[env])\n plt.xlabel('number of environment steps (x $\\mathregular{10^6}$)')\n plt.ylabel('return')\n plt.xlim(0, 2)\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n env_list = ['Hopper-v2', 'Walker2d-v2', 'HalfCheetah-v2', 'Ant-v2', 'Humanoid-v2',\n 'HopperBulletEnv-v0', 'Walker2DBulletEnv-v0', 'HalfCheetahBulletEnv-v0',\n 'AntBulletEnv-v0', 'HumanoidBulletEnv-v0']\n mpl.style.use('seaborn')\n for env in range(10):\n plot_curve(env)\n" ]
[ [ "numpy.max", "matplotlib.style.use", "matplotlib.pyplot.xlim", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "numpy.min", "numpy.mean", "matplotlib.pyplot.legend", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
0xADD1E/DepSolve
[ "3ef11b66f3f71496dc5370ced8241ffdd2c57d31" ]
[ "Circular.py" ]
[ "# In[]\n# Imports and Stuff\nimport z3\nimport csv\nimport sys\nimport pprint\nimport numpy as np\nfrom collections import defaultdict\nfrom collections import namedtuple\npp = pprint.PrettyPrinter(indent=4)\nDependency = namedtuple('Dependency', 'me dependsOn')\nsystemFile = 'RunFile.smt2'\nresultsFile = 'Results'\n\n# In[]\n# Create our deps list\ndeps = []\ny = {}\nwith open(sys.argv[1]) as myFile:\n myFile.readline()\n r = csv.reader(myFile)\n for line in r:\n a = line[0]\n for b in line[1].split(','):\n me = b\n dep = a\n deps.append(Dependency(me,dep))\n if me not in y: y[me] = z3.Int(me)\n if dep not in y: y[dep] = z3.Int(dep)\ndeps\n\n# In[]\n# Construct Transitive Closure... This... Could get hairy\n\n#setup\nsymbols_list = list(y)\nn_symbols = len(symbols_list)\nsymbols_indexes = {s:i for i,s in enumerate(symbols_list)}\ntransitive = np.full((n_symbols,n_symbols), False)\n#fill\nfor me,dep in deps:\n i,j = symbols_indexes[me],symbols_indexes[dep]\n transitive[i][j] = True\n#transitive\nfor i in range(n_symbols):\n for j in range(n_symbols):\n for k in range(n_symbols):\n transitive[i][j] |= transitive[i][k] and transitive[k][j]\npp.pprint(symbols_indexes)\npp.pprint(transitive)\n\n# In[]\n# Create the system\ns = z3.Solver()\nfor me,dep in deps: \n if transitive[symbols_indexes[dep]][symbols_indexes[me]]:\n s.add(y[dep]<=y[me])\n else:\n s.add(y[dep]<y[me])\n\nfor k in y:\n s.add(y[k]>0)\n\nwith open(systemFile, 'w') as myFile:\n myFile.write(s.to_smt2())\n pp.pprint('SMT System saved as {}'.format(systemFile))\n\n# In[]\n# Run the solver\nif str(s.check()) == 'sat':\n m = s.model()\n result = {}\n for a in y:\n stage = str(m.eval(y[a]))\n if stage not in result: result[stage] = []\n result[stage].append(a)\nelse:\n result = 'unsat'\n\nwith open(resultsFile, 'w') as myFile:\n myFile.write(str(result))\n pp.pprint('Results saved as {}'.format(resultsFile))\npp.pprint(result)\n\n" ]
[ [ "numpy.full" ] ]
stijnana/druktemeter
[ "cdb9030e54985028ef75677c477c11caf7989a12" ]
[ "geluidleves.py" ]
[ "from Adafruit_IO import *\nimport RPi.GPIO as GPIO\nimport time as yotimma\nimport numpy as np\nimport sounddevice as sd\n\n#Connectie met de adafruit api\naio = Client('Nizari' , '')\n\n#setten van de pins\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nPIR_PIN = 3\nGPIO.setup(PIR_PIN, GPIO.IN)\n\n#print dat de code ready is\nprint('Starting up the PIR Module (click on STOP to exit)')\nprint('Ready')\n\ntotalDb = []\n\n\n#over hoeveel tijd wil je het gemidelde pakken van hoe druk het is\nduration = 3 #in seconds\n\n\n#functie die ophaalt wat de geluids levels zijn\ndef audio_callback(indata, frames, time, status):\n volume_norm = np.linalg.norm(indata) * 10\n volume_norm = int(volume_norm)\n totalDb.append(volume_norm)\n print(volume_norm)\n \n \n#send via de adafuit api data naar het dashboard\ndef send_data(dbArray):\n length = len(dbArray)\n total = sum(dbArray)\n \n average = total / length\n \n averageRound = int(average)\n aio.send(\"sound-levels\", int(averageRound))\n\n totalDb.clear()\n \n \n#de check of er beweging is te zien voor de sensoren als er beweging is erkent start hij de opnamen van een gemidlde geluids levels \nwhile True:\n if GPIO.input(PIR_PIN):\n print('Motion Detected')\n stream = sd.InputStream(callback=audio_callback)\n with stream:\n sd.sleep(duration * 1000)\n send_data(totalDb)\n else:\n print('No Motion Detected')\n aio.send(\"sound-levels\", 0)\n \n yotimma.sleep(3)\n yotimma.sleep(1)\n" ]
[ [ "numpy.linalg.norm" ] ]
hemanth-s17/Deep-Fish-Tracker-Network
[ "95af21ae662c1f2dee008f41abc6d400a2970ed8" ]
[ "networks/siamese/convolutional_siamese.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\n\nimport random\nimport os\nfrom cv2 import imread\nfrom keras.layers import Input,Conv2D,MaxPooling2D,Flatten,Dense,Dropout,Lambda,LSTM,BatchNormalization,LeakyReLU,PReLU\nfrom keras import Sequential\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Input, Flatten, Dense, Dropout, Lambda\nfrom keras.optimizers import RMSprop,Adam\nfrom keras import initializers, regularizers, optimizers\nfrom keras import backend as K\nfrom keras.regularizers import l2\nfrom keras.initializers import VarianceScaling\nfrom keras.callbacks import ModelCheckpoint\nimport matplotlib.pyplot as plt\nimport numpy.random as rng\n\n\n\n\ndef contrastive_loss(y_true, y_pred):\n margin = 0.6\n square_pred = K.square(y_pred)\n margin_square = K.square(K.maximum(margin - y_pred, 0))\n return K.mean(y_true * square_pred + (1 - y_true) * margin_square)\n\n\ndef W_init(shape,name=None):\n values = rng.normal(loc=0,scale=1e-2,size=shape)\n return K.variable(values,name=name)\n\n\ndef b_init(shape,name=None):\n values=rng.normal(loc=0.5,scale=1e-2,size=shape)\n return K.variable(values,name=name)\n\n\ndef SiameseNetwork(input_shape):\n top_input = Input(input_shape)\n\n bottom_input = Input(input_shape)\n\n # Network\n\n model = Sequential()\n\n model.add(Conv2D(96,(7,7),activation='relu'))\n \n model.add(MaxPooling2D())\n \n model.add(BatchNormalization())\n \n model.add(Conv2D(64,(5,5),activation='relu'))\n \n model.add(MaxPooling2D())\n \n model.add(BatchNormalization())\n\n model.add(Conv2D(64,(5,5),activation='relu'))\n \n model.add(MaxPooling2D())\n \n model.add(BatchNormalization())\n\n model.add(Flatten())\n\n model.add(Dense(4096,activation='relu'))\n\n model.add(BatchNormalization())\n \n model.add(Dense(1024,activation='relu'))\n \n model.add(BatchNormalization())\n\n model.add(Dense(512,activation='relu'))\n \n model.add(BatchNormalization())\n\n encoded_top = model(top_input)\n\n encoded_bottom = model(bottom_input)\n\n L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))\n \n L1_distance = L1_layer([encoded_top, encoded_bottom])\n\n prediction = Dense(1,activation='sigmoid',bias_initializer=b_init)(L1_distance)\n\n siamesenet = Model(inputs=[top_input,bottom_input],outputs=prediction)\n\n return siamesenet\n\n\ndef loadimgs(path,n = 0):\n X=[]\n y = []\n curr_y = n\n \n for alphabet in os.listdir(path):\n print(\"loading alphabet: \" + alphabet)\n alphabet_path = os.path.join(path,alphabet)\n \n category_images=[]\n\n for filename in os.listdir(alphabet_path):\n image_path = os.path.join(alphabet_path, filename)\n image = imread(image_path).astype('float32')/255\n category_images.append(image)\n y.append(curr_y)\n try:\n X.append(np.stack(category_images))\n except ValueError as e:\n print(e)\n print(\"error - category_images:\", category_images)\n curr_y += 1\n y = np.vstack(y)\n X = np.stack(X)\n return X,y\n\ndef create_pairs(x, digit_indices):\n '''Positive and negative pair creation.\n Alternates between positive and negative pairs.\n '''\n num_classes = 23\n pairs = []\n labels = []\n n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1\n for d in range(num_classes):\n for i in range(n):\n z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]\n # each folder should have same number of image ex 1447 here\n f21 = z1//1447\n l31 = z1 % 1447\n f22 = z2//1447\n l32 = z2 % 1447\n pairs += [[x[f21][l31], x[f22][l32]]]\n inc = random.randrange(1, num_classes)\n dn = (d + inc) % num_classes\n z1, z2 = digit_indices[d][i], digit_indices[dn][i]\n f21 = z1//1447\n l31 = z1 % 1447\n f22 = z2//1447\n l32 = z2 % 1447\n pairs += [[x[f21][l31], x[f22][l32]]]\n labels += [1, 0]\n return np.array(pairs), np.array(labels)\n\n\nX,y = loadimgs('Training_Folder')\ndigit_indices = [np.where(y == i)[0] for i in range(23)]\ntr_pairs,tr_y = create_pairs(X,digit_indices)\nprint(tr_y.dtype)\nprint(tr_y.shape)\nprint(tr_y)\nprint(tr_pairs[:,0][0])\n\n\ninput_shape = (53,121,3)\nmodel = SiameseNetwork(input_shape)\nfilepath = \"/home/hemanth12/Paper/Networks/Siamese/Models/simaese-{epoch:02d}-{val_acc:.2f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=False, mode='max')\nrms = RMSprop()\nprint(model.summary())\nmodel.compile(loss='mse', optimizer=rms, metrics=['accuracy'])\nhistory = model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y.astype('float32'),\n batch_size=32,\n epochs=30,\n validation_split = 0.1,callbacks = [checkpoint])\n\n\n# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n\n\n\n\n\n\n" ]
[ [ "numpy.random.normal", "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.where", "numpy.stack", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.vstack" ] ]
Fuchai/mayoehr
[ "ec79d2157bedf4f4f0fc783d86523df8a758e27c" ]
[ "death/trashcan/babi/babitrainnotmine.py" ]
[ "# I decide to run my model on babi again to see if the convergen ce problem is with my model or dataset\n\nfrom dnc import DNC\nimport torch\nimport numpy\nfrom pathlib import Path\nimport pickle\nimport os\nfrom os.path import join, abspath\nfrom death.DNC.trashcan.babi.babigen import PreGenData\nfrom torch.autograd import Variable\n\n\n# task 10 of babi\n\nclass dummy_context_mgr():\n def __enter__(self):\n return None\n\n def __exit__(self, exc_type, exc_value, traceback):\n return False\n\n\ndef save_model(net, optim, epoch):\n epoch = int(epoch)\n task_dir = os.path.dirname(abspath(__file__))\n pickle_file = Path(task_dir).joinpath(\"saves/DNCfull_\" + str(epoch) + \".pkl\")\n pickle_file = pickle_file.open('wb')\n torch.save((net, optim, epoch), pickle_file)\n\n\ndef load_model(computer):\n task_dir = os.path.dirname(abspath(__file__))\n save_dir = Path(task_dir) / \"saves\"\n highestepoch = -1\n for child in save_dir.iterdir():\n epoch = str(child).split(\"_\")[2].split('.')[0]\n epoch = int(epoch)\n # some files are open but not written to yet.\n if epoch > highestepoch and child.stat().st_size > 2048:\n highestepoch = epoch\n if highestepoch == -1:\n return computer, None, -1\n pickle_file = Path(task_dir).joinpath(\"saves/DNCfull_\" + str(highestepoch) + \".pkl\")\n print(\"loading model at \", pickle_file)\n pickle_file = pickle_file.open('rb')\n model, optim, epoch = torch.load(pickle_file)\n\n print('Loaded model at epoch ', highestepoch)\n\n for child in save_dir.iterdir():\n epoch = str(child).split(\"_\")[2].split('.')[0]\n if int(epoch) != highestepoch:\n os.remove(child)\n print('Removed incomplete save file and all else.')\n\n return model, optim, epoch\n\n\ndef save_model_old(net, optim, epoch):\n state_dict = net.state_dict()\n for key in state_dict.keys():\n state_dict[key] = state_dict[key].cpu()\n task_dir = os.path.dirname(abspath(__file__))\n print(task_dir)\n pickle_file = Path(\"../saves/DNC_\" + str(epoch) + \".pkl\")\n pickle_file = pickle_file.open('wb')\n\n torch.save({\n 'epoch': epoch,\n 'state_dict': state_dict,\n 'optimizer': optim},\n pickle_file)\n\n\ndef load_model_old(net):\n task_dir = os.path.dirname(abspath(__file__))\n save_dir = Path(task_dir).parent / \"saves\"\n highestepoch = -1\n for child in save_dir.iterdir():\n epoch = str(child).split(\"_\")[1].split('.')[0]\n # some files are open but not written to yet.\n if int(epoch) > highestepoch and child.stat().st_size > 2048:\n highestepoch = int(epoch)\n pickle_file = Path(\"../saves/DNC_\" + str(highestepoch) + \".pkl\")\n pickle_file = pickle_file.open('rb')\n ret = torch.load(pickle_file)\n\n net.load_state_dict(ret['state_dict'])\n print('Loaded model at epoch ', highestepoch)\n\n for child in save_dir.iterdir():\n epoch = str(child).split(\"_\")[1].split('.')[0]\n if int(epoch) != highestepoch:\n os.remove(child)\n print('Removed incomplete save file and all else.')\n\n return ret['epoch'], ret['optimizer']\n\n\ndef run_one_story(computer, optimizer, story_length, batch_size, pgd, input_dim, mhx, validate=False):\n # to promote code reuse\n if not validate:\n input_data, target_output, critical_index = pgd.get_train()\n else:\n input_data, target_output, critical_index = pgd.get_validate()\n\n input_data = Variable(torch.Tensor(input_data).cuda())\n target_output = Variable(torch.Tensor(target_output).cuda())\n stairs = torch.Tensor(numpy.arange(0, batch_size * story_length, story_length))\n critical_index = critical_index + stairs.unsqueeze(1)\n critical_index = critical_index.view(-1)\n critical_index = critical_index.long().cuda()\n\n criterion = torch.nn.CrossEntropyLoss()\n\n with torch.no_grad if validate else dummy_context_mgr():\n\n # a single story\n story_output, (_, mhx, _) = computer(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)\n if (story_output!=story_output).any():\n raise ValueError(\"nan is found in the batch output.\")\n\n target_output = target_output.view(-1)\n story_output = story_output.view(-1, input_dim)\n story_output = story_output[critical_index, :]\n target_output = target_output[critical_index].long()\n\n story_loss = criterion(story_output, target_output)\n if not validate:\n # I chose to backward a derivative only after a whole story has been taken in\n # This should lead to a more stable, but initially slower convergence.\n story_loss.backward()\n optimizer.step()\n\n return story_loss, mhx\n\n\ndef train(computer, optimizer, story_length, batch_size, pgd, input_dim, starting_epoch, epochs_count, epoch_batches_count):\n mhx=None\n\n for epoch in range(starting_epoch, epochs_count):\n\n for batch in range(epoch_batches_count):\n\n train_story_loss, mhx = run_one_story(computer, optimizer, story_length, batch_size, pgd, input_dim,mhx)\n print(\"learning. epoch: %4d, batch number: %4d, training loss: %.4f\" %\n (epoch, batch, train_story_loss[0]))\n # keeping the running loss causes GPU memory leak.\n # reassignment of variables retain graph\n # reassignment with 0 changes the internal value and does not seem to reinitiate the object?\n # do not keep running loss. Not necessary anyway.\n val_freq = 16\n if batch % val_freq == val_freq - 1:\n # also test the model\n val_loss = run_one_story(computer, optimizer, story_length, batch_size, pgd, input_dim, mhx, validate=False)\n print('validate. epoch: %4d, batch number: %4d, validation loss: %.4f' %\n (epoch, batch, val_loss))\n\n save_model(computer, optimizer, epoch)\n print(\"model saved for epoch \", epoch)\n\n\ndef main():\n story_limit = 150\n epoch_batches_count = 64\n epochs_count = 1024\n lr = 1e-11\n optim = 1\n starting_epoch = -1\n bs=32\n pgd = PreGenData(bs)\n\n task_dir = os.path.dirname(abspath(__file__))\n processed_data_dir = join(task_dir, 'data',\"processed\")\n lexicon_dictionary=pickle.load( open(join(processed_data_dir, 'lexicon-dict.pkl'), 'rb'))\n x=len(lexicon_dictionary)\n\n\n computer = DNC(x,x,num_layers=4,num_hidden_layers=4,cell_size=4,nr_cells=4,read_heads=4,gpu_id=0).cuda()\n\n # if load model\n # computer, optim, starting_epoch = load_model(computer)\n\n computer = computer.cuda()\n if optim is None:\n optimizer = torch.optim.Adam(computer.parameters(), lr=lr)\n else:\n print('use Adadelta optimizer with learning rate ', lr)\n optimizer = torch.optim.Adadelta(computer.parameters(), lr=lr)\n\n # starting with the epoch after the loaded one\n train(computer, optimizer, story_limit, bs, pgd, x, int(starting_epoch) + 1, epochs_count, epoch_batches_count)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.save", "numpy.arange", "torch.load", "torch.Tensor", "torch.nn.CrossEntropyLoss" ] ]
CVIU-CSU/mediapipe_turtlebot3
[ "ef4171d114af44d14c1525934e6c57a12f318690" ]
[ "model/point_history_classifier/point_history_classifier.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\n\n\nclass PointHistoryClassifier(object):\n def __init__(\n self,\n model_path='model/point_history_classifier/point_history_classifier.tflite',\n score_th=0.5,\n invalid_value=0,\n num_threads=1,\n ):\n self.interpreter = tf.lite.Interpreter(model_path=model_path,\n num_threads=num_threads)\n\n self.interpreter.allocate_tensors()\n self.input_details = self.interpreter.get_input_details()\n self.output_details = self.interpreter.get_output_details()\n\n self.score_th = score_th\n self.invalid_value = invalid_value\n\n def __call__(\n self,\n point_history,\n ):\n input_details_tensor_index = self.input_details[0]['index']\n self.interpreter.set_tensor(\n input_details_tensor_index,\n np.array([point_history], dtype=np.float32))\n self.interpreter.invoke()\n\n output_details_tensor_index = self.output_details[0]['index']\n\n result = self.interpreter.get_tensor(output_details_tensor_index)\n\n result_index = np.argmax(np.squeeze(result))\n\n if np.squeeze(result)[result_index] < self.score_th:\n result_index = self.invalid_value\n\n return result_index\n" ]
[ [ "numpy.array", "tensorflow.lite.Interpreter", "numpy.squeeze" ] ]
sophiaalthammer/dossier_coliee
[ "997168ab076caf04beecd5834f3ba25f399c3f84" ]
[ "eval/eval_bm25_coliee2021.py" ]
[ "import os\r\nimport jsonlines\r\nimport json\r\nimport ast\r\nimport argparse\r\nimport pytrec_eval\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nsns.set(color_codes=True, font_scale=1.2)\r\nfrom collections import Counter\r\n\r\n\r\ndef read_run_whole_doc(bm25_folder: str, scores='ranks'):\r\n # geh in den bm25 folder, lies in dokument und query: dann dict {query: {top 1000}}\r\n run = {}\r\n for root, dirs, files in os.walk(bm25_folder):\r\n for file in files:\r\n with open(os.path.join(bm25_folder, file), 'r') as f:\r\n lines = f.readlines()\r\n lines_dict = {}\r\n for i in range(len(lines)):\r\n if scores == 'scores':\r\n lines_dict.update({lines[i].split(' ')[0].strip().strip('_0'): float(lines[i].split(' ')[-1].strip())})\r\n else:\r\n lines_dict.update({lines[i].split(' ')[0].strip().strip('_0'): len(lines) - i})\r\n run.update({file.split('_')[2]: lines_dict})\r\n return run\r\n\r\n\r\ndef ranking_eval(qrels, run, output_dir, output_file= 'eval_bm25_aggregate_overlap.txt'):\r\n # trec eval\r\n evaluator = pytrec_eval.RelevanceEvaluator(qrels, pytrec_eval.supported_measures)\r\n\r\n results = evaluator.evaluate(run)\r\n\r\n def print_line(measure, scope, value):\r\n print('{:25s}{:8s}{:.4f}'.format(measure, scope, value))\r\n\r\n def write_line(measure, scope, value):\r\n return '{:25s}{:8s}{:.4f}'.format(measure, scope, value)\r\n\r\n for query_id, query_measures in sorted(results.items()):\r\n for measure, value in sorted(query_measures.items()):\r\n print_line(measure, query_id, value)\r\n\r\n #for measure in sorted(query_measures.keys()):\r\n # print_line(\r\n # measure,\r\n # 'all',\r\n # pytrec_eval.compute_aggregated_measure(\r\n # measure,\r\n # [query_measures[measure]\r\n # for query_measures in results.values()]))\r\n\r\n with open(os.path.join(output_dir, output_file), 'w') as output:\r\n for measure in sorted(query_measures.keys()):\r\n output.write(write_line(\r\n measure,\r\n 'all',\r\n pytrec_eval.compute_aggregated_measure(\r\n measure,\r\n [query_measures[measure]\r\n for query_measures in results.values()])) + '\\n')\r\n\r\n\r\ndef read_label_file(label_file: str):\r\n with open(label_file, 'rb') as f:\r\n labels = json.load(f)\r\n\r\n # other format of labels:\r\n qrels = {}\r\n for key, values in labels.items():\r\n val_format = {}\r\n for value in values:\r\n val_format.update({'{}'.format(value.split('.')[0]): 1})\r\n qrels.update({key.split('.')[0]: val_format})\r\n return qrels\r\n\r\n\r\ndef read_run_separate(bm25_folder: str, scores='ranks'):\r\n run = {}\r\n for root, dirs, files in os.walk(bm25_folder):\r\n for file in files:\r\n with open(os.path.join(bm25_folder, file), 'r') as f:\r\n lines = f.readlines()\r\n lines_dict = {}\r\n for i in range(len(lines)):\r\n if scores == 'scores':\r\n lines_dict.update({lines[i].split(' ')[0]: float(lines[i].split(' ')[-1].strip())})\r\n else:\r\n lines_dict.update({lines[i].split(' ')[0]: len(lines) - i})\r\n if run.get(file.split('_')[2]):\r\n run.get(file.split('_')[2]).update({file.split('_')[3]: lines_dict})\r\n else:\r\n run.update({file.split('_')[2]: {}})\r\n run.get(file.split('_')[2]).update({file.split('_')[3]: lines_dict})\r\n return run\r\n\r\n\r\n\r\ndef read_run_separate_aggregate(bm25_folder: str, aggregation='interleave', scores='ranks'):\r\n # geh in den bm25 folder, lies in dokument und query: dann dict {query: {top 1000}}\r\n if aggregation == 'overlap_scores' or aggregation == 'mean_scores':\r\n scores = 'scores'\r\n\r\n run = read_run_separate(bm25_folder, scores)\r\n\r\n # now i need an aggregation function here, different choices\r\n if aggregation == 'overlap_docs':\r\n # now aggregate according to the overlap of the docs in the paragraphs!\r\n run_aggregated = aggregate_run_overlap(run)\r\n elif aggregation == 'interleave':\r\n run_aggregated = aggregate_run_interleave(run)\r\n elif aggregation == 'overlap_ranks':\r\n # now aggregate according to the overlap of the docs in the paragraphs!\r\n run_aggregated = aggregate_run_ranks_overlap(run)\r\n elif aggregation == 'overlap_scores':\r\n run_aggregated = aggregate_run_ranks_overlap(run)\r\n elif aggregation == 'mean_scores':\r\n run_aggregated = aggregate_run_mean_score(run)\r\n if run_aggregated:\r\n return run_aggregated\r\n else:\r\n return run\r\n\r\n\r\ndef read_run_whole_doc(bm25_folder: str, scores='ranks'):\r\n # geh in den bm25 folder, lies in dokument und query: dann dict {query: {top 1000}}\r\n run = {}\r\n for root, dirs, files in os.walk(bm25_folder):\r\n for file in files:\r\n with open(os.path.join(bm25_folder, file), 'r') as f:\r\n lines = f.readlines()\r\n lines_dict = {}\r\n for i in range(len(lines)):\r\n if scores == 'scores':\r\n lines_dict.update({lines[i].split(' ')[0].split('_')[0]: float(lines[i].split(' ')[-1].strip())})\r\n else:\r\n lines_dict.update({lines[i].split(' ')[0].split('_')[0]: len(lines) - i})\r\n run.update({file.split('_')[2]: lines_dict})\r\n return run\r\n\r\n\r\ndef aggregate_run_overlap(run):\r\n for doc in run.keys():\r\n for para in run.get(doc).keys():\r\n for para_rel in run.get(doc).get(para).keys():\r\n run.get(doc).get(para)[para_rel] = 1\r\n run_aggregated = {}\r\n for doc in run.keys():\r\n for para in run.get(doc).keys():\r\n for para_rel, value in run.get(doc).get(para).items():\r\n if run_aggregated.get(doc):\r\n if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])):\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): run_aggregated.get(\r\n doc).get('_'.join(para_rel.split('_')[:1])) + 1})\r\n else:\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): 1})\r\n else:\r\n run_aggregated.update({doc: {}})\r\n if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])):\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): run_aggregated.get(\r\n doc).get('_'.join(para_rel.split('_')[:1])) + 1})\r\n else:\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): 1})\r\n return run_aggregated\r\n\r\n\r\ndef aggregate_run_ranks_overlap(run):\r\n run_aggregated = {}\r\n for doc in run.keys():\r\n for para in run.get(doc).keys():\r\n for para_rel, value in run.get(doc).get(para).items():\r\n if run_aggregated.get(doc):\r\n if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])):\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): run_aggregated.get(\r\n doc).get('_'.join(para_rel.split('_')[:1])) + value})\r\n else:\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): value})\r\n else:\r\n run_aggregated.update({doc: {}})\r\n if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])):\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): run_aggregated.get(\r\n doc).get('_'.join(para_rel.split('_')[:1])) + value})\r\n else:\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): value})\r\n return run_aggregated\r\n\r\n\r\ndef aggregate_run_interleave(run):\r\n run_aggregated = {}\r\n for doc in run.keys():\r\n for para in run.get(doc).keys():\r\n for para_rel, value in run.get(doc).get(para).items():\r\n if run_aggregated.get(doc):\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): value})\r\n else:\r\n run_aggregated.update({doc: {}})\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): value})\r\n return run_aggregated\r\n\r\n\r\ndef aggregate_run_mean_score(run):\r\n run_aggregated = {}\r\n for doc in run.keys():\r\n for para in run.get(doc).keys():\r\n for para_rel, value in run.get(doc).get(para).items():\r\n if run_aggregated.get(doc):\r\n if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])):\r\n run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])).append(value)\r\n #run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:2]): })\r\n else:\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): [value]})\r\n else:\r\n run_aggregated.update({doc: {}})\r\n if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])):\r\n run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])).append(value)\r\n #print(list)\r\n #run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:2]): list})\r\n else:\r\n run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): [value]})\r\n for doc in run_aggregated.keys():\r\n for key, item in run_aggregated.get(doc).items():\r\n run_aggregated.get(doc).update({key: np.mean(item)})\r\n return run_aggregated\r\n\r\n\r\ndef eval_ranking_bm25(label_file, bm25_folder, output_dir, output_file: str, aggregation='interleave', scores='ranks'):\r\n\r\n if aggregation == 'overlap_scores':\r\n scores = 'scores'\r\n\r\n qrels = read_label_file(label_file)\r\n if 'separate' in bm25_folder:\r\n run = read_run_separate_aggregate(bm25_folder, aggregation, scores)\r\n else:\r\n run = read_run_whole_doc(bm25_folder, scores)\r\n\r\n ranking_eval(qrels, run, output_dir, output_file)\r\n\r\n\r\ndef ranking_eval(qrels, run, output_dir, output_file= 'eval_bm25_aggregate_overlap.txt'):\r\n # trec eval\r\n evaluator = pytrec_eval.RelevanceEvaluator(qrels, {'recall_100', 'recall_200', 'recall_300', 'recall_500', 'recall_1000'})\r\n\r\n results = evaluator.evaluate(run)\r\n\r\n def print_line(measure, scope, value):\r\n print('{:25s}{:8s}{:.4f}'.format(measure, scope, value))\r\n\r\n def write_line(measure, scope, value):\r\n return '{:25s}{:8s}{:.4f}'.format(measure, scope, value)\r\n\r\n for query_id, query_measures in sorted(results.items()):\r\n for measure, value in sorted(query_measures.items()):\r\n print_line(measure, query_id, value)\r\n\r\n #for measure in sorted(query_measures.keys()):\r\n # print_line(\r\n # measure,\r\n # 'all',\r\n # pytrec_eval.compute_aggregated_measure(\r\n # measure,\r\n # [query_measures[measure]\r\n # for query_measures in results.values()]))\r\n\r\n with open(os.path.join(output_dir, output_file), 'w') as output:\r\n for measure in sorted(query_measures.keys()):\r\n output.write(write_line(\r\n measure,\r\n 'all',\r\n pytrec_eval.compute_aggregated_measure(\r\n measure,\r\n [query_measures[measure]\r\n for query_measures in results.values()])) + '\\n')\r\n\r\n\r\ndef eval_ranking_overall_recall(label_file, bm25_folder, output_dir, mode, aggregation='interleave'):\r\n qrels = read_label_file(label_file)\r\n if 'separate' in bm25_folder:\r\n run = read_run_separate_aggregate(bm25_folder, aggregation)\r\n else:\r\n run = read_run_whole_doc(bm25_folder)\r\n\r\n # overall recall\r\n rec_per_topic = []\r\n for key, value in qrels.items():\r\n rel_run = run.get(key)\r\n value_list = [val for val in value.keys()]\r\n rel_run_list = [rel for rel in rel_run.keys()]\r\n rec_per_topic.append(len(list(set(value_list) & set(rel_run_list))) / len(value_list))\r\n\r\n print('Overall recall of {} is {}'.format(''.join([mode[0], mode[1]]), np.mean(rec_per_topic)))\r\n\r\n with open(os.path.join(output_dir, 'eval_recall_overall.txt'), 'w') as output:\r\n output.write('Overall recall of {} is {}'.format(''.join([mode[0], mode[1]]), np.mean(rec_per_topic)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #\r\n # config\r\n #\r\n\r\n #parser = argparse.ArgumentParser()\r\n\r\n #parser.add_argument('--label-file', action='store', dest='label_file',\r\n # help='org file with the guid and the labels', required=True)\r\n #parser.add_argument('--pred-file', action='store', dest='pred_file',\r\n # help='file with the binary prediction per guid', required=False)\r\n #parser.add_argument('--bm25-folder', action='store', dest='bm25_folder',\r\n # help='folder with the BM25 retrieval per guid which the result is compared to', required=False)\r\n #args = parser.parse_args()\r\n\r\n label_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/train/train_wo_val_labels.json'\r\n label_file_val = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/val/val_labels.json'\r\n\r\n\r\n def eval_mode(mode):\r\n bm25_folder = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/bm25/search/{}/{}'.format(mode[0], mode[1])\r\n output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/bm25/eval/{}/{}'.format(mode[0], mode[1])\r\n\r\n if bm25_folder:\r\n if mode[0] == 'val':\r\n eval_ranking_bm25(label_file_val, bm25_folder, output_dir, 'eval_bm25_r300_aggregate_{}.txt'.format(mode[2]),aggregation=mode[2])\r\n elif mode[0] == 'train':\r\n eval_ranking_bm25(label_file, bm25_folder, output_dir, 'eval_bm25_r300_aggregate_{}.txt'.format(mode[2]), aggregation=mode[2])\r\n\r\n\r\n def eval_all_bm25():\r\n ## train ##\r\n # whole doc evaluation\r\n eval_mode(['train', 'whole_doc_para_only', 'overlap_docs'])\r\n eval_mode(['train', 'whole_doc_w_summ_intro', 'overlap_docs'])\r\n\r\n # sep para: interleave\r\n eval_mode(['train', 'separately_para_only', 'interleave'])\r\n eval_mode(['train', 'separately_para_w_summ_intro', 'interleave'])\r\n\r\n # sep para: overlap docs\r\n eval_mode(['train', 'separately_para_only', 'overlap_docs'])\r\n eval_mode(['train', 'separately_para_w_summ_intro', 'overlap_docs'])\r\n\r\n # sep para: overlap ranks\r\n eval_mode(['train', 'separately_para_only', 'overlap_ranks'])\r\n eval_mode(['train', 'separately_para_w_summ_intro', 'overlap_ranks'])\r\n\r\n # sep para: overlap scores\r\n eval_mode(['train', 'separately_para_only', 'overlap_scores'])\r\n eval_mode(['train', 'separately_para_w_summ_intro', 'overlap_scores'])\r\n\r\n # sep para: mean scores\r\n eval_mode(['train', 'separately_para_only', 'mean_scores'])\r\n eval_mode(['train', 'separately_para_w_summ_intro', 'mean_scores'])\r\n\r\n\r\n ## val ##\r\n # whole doc evaluation\r\n eval_mode(['val', 'whole_doc_para_only', 'overlap_docs'])\r\n eval_mode(['val', 'whole_doc_w_summ_intro', 'overlap_docs'])\r\n\r\n # sep para: interleave\r\n eval_mode(['val', 'separately_para_only', 'interleave'])\r\n eval_mode(['val', 'separately_para_w_summ_intro', 'interleave'])\r\n\r\n # sep para: overlap docs\r\n eval_mode(['val', 'separately_para_only', 'overlap_docs'])\r\n eval_mode(['val', 'separately_para_w_summ_intro', 'overlap_docs'])\r\n\r\n # sep para: overlap ranks\r\n eval_mode(['val', 'separately_para_only', 'overlap_ranks'])\r\n eval_mode(['val', 'separately_para_w_summ_intro', 'overlap_ranks'])\r\n\r\n # sep para: overlap scores\r\n eval_mode(['val', 'separately_para_only', 'overlap_scores'])\r\n eval_mode(['val', 'separately_para_w_summ_intro', 'overlap_scores'])\r\n\r\n # sep para: mean scores\r\n eval_mode(['val', 'separately_para_only', 'mean_scores'])\r\n eval_mode(['val', 'separately_para_w_summ_intro', 'mean_scores'])\r\n\r\n\r\n eval_all_bm25()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "numpy.mean" ] ]
skachuck/oggm
[ "b391e6923fb0c5269e10ea260f5199a26d5e1082" ]
[ "oggm/shop/its_live.py" ]
[ "import logging\n\nimport numpy as np\n\ntry:\n import salem\nexcept ImportError:\n pass\ntry:\n import rasterio\nexcept ImportError:\n pass\n\nfrom oggm import utils\nfrom oggm.exceptions import InvalidWorkflowError\n\n# Module logger\nlog = logging.getLogger(__name__)\n\nbase_url = ('http://its-live-data.jpl.nasa.gov.s3.amazonaws.com/'\n 'velocity_mosaic/landsat/v00.0/static/cog/')\nregions = ['HMA', 'ANT', 'PAT', 'ALA', 'CAN', 'GRE', 'ICE', 'SRA']\n\nregion_files = {}\nfor reg in regions:\n d = {}\n for var in ['vx', 'vy', 'vy_err', 'vx_err']:\n d[var] = base_url + '{}_G0120_0000_{}.tif'.format(reg, var)\n region_files[reg] = d\n\nregion_grids = {}\n\nrgi_region_links = {'01': 'ALA', '02': 'ALA',\n '03': 'CAN', '04': 'CAN',\n '05': 'GRE',\n '06': 'ICE',\n '07': 'SRA', '09': 'SRA',\n '13': 'HMA', '14': 'HMA', '15': 'HMA',\n '17': 'PAT',\n '19': 'ANT',\n }\n\n\ndef region_grid(reg):\n\n global region_grids\n\n if reg not in region_grids:\n with utils.get_lock():\n fp = utils.file_downloader(region_files[reg]['vx'])\n ds = salem.GeoTiff(fp)\n region_grids[reg] = ds.grid\n\n return region_grids[reg]\n\n\ndef _in_grid(grid, lon, lat):\n\n i, j = grid.transform([lon], [lat], maskout=True)\n return np.all(~ (i.mask | j.mask))\n\n\ndef find_region(gdir):\n\n reg = rgi_region_links.get(gdir.rgi_region, None)\n\n if reg is None:\n return None\n\n grid = region_grid(reg)\n\n if _in_grid(grid, gdir.cenlon, gdir.cenlat):\n return reg\n else:\n return None\n\n\ndef _reproject_and_scale(gdir, do_error=False):\n \"\"\"Reproject and scale itslive data, avoid code duplication for error\"\"\"\n\n\n reg = find_region(gdir)\n if reg is None:\n raise InvalidWorkflowError('There does not seem to be its_live data '\n 'available for this glacier')\n\n vnx = 'vx'\n vny = 'vy'\n if do_error:\n vnx += '_err'\n vny += '_err'\n\n with utils.get_lock():\n fx = utils.file_downloader(region_files[reg][vnx])\n fy = utils.file_downloader(region_files[reg][vny])\n\n # Open the files\n dsx = salem.GeoTiff(fx)\n dsy = salem.GeoTiff(fy)\n # subset them to our map\n grid_gla = gdir.grid.center_grid\n proj_vel = dsx.grid.proj\n x0, x1, y0, y1 = grid_gla.extent_in_crs(proj_vel)\n dsx.set_subset(corners=((x0, y0), (x1, y1)), crs=proj_vel, margin=4)\n dsy.set_subset(corners=((x0, y0), (x1, y1)), crs=proj_vel, margin=4)\n grid_vel = dsx.grid.center_grid\n\n # TODO: this should be taken care of by salem\n # https://github.com/fmaussion/salem/issues/171\n with rasterio.Env():\n with rasterio.open(fx) as src:\n nodata = getattr(src, 'nodata', -32767.0)\n\n # Error files are wrong\n if nodata == 0:\n nodata = -32767.0\n\n # Get the coords at t0\n xx0, yy0 = grid_vel.center_grid.xy_coordinates\n\n # Compute coords at t1\n xx1 = dsx.get_vardata()\n yy1 = dsy.get_vardata()\n non_valid = (xx1 == nodata) | (yy1 == nodata)\n xx1[non_valid] = np.NaN\n yy1[non_valid] = np.NaN\n orig_vel = np.sqrt(xx1**2 + yy1**2)\n xx1 += xx0\n yy1 += yy0\n\n # Transform both to glacier proj\n xx0, yy0 = salem.transform_proj(proj_vel, grid_gla.proj, xx0, yy0)\n xx1, yy1 = salem.transform_proj(proj_vel, grid_gla.proj, xx1, yy1)\n\n # Correct no data after proj as well (inf)\n xx1[non_valid] = np.NaN\n yy1[non_valid] = np.NaN\n\n # Compute velocities from there\n vx = xx1 - xx0\n vy = yy1 - yy0\n\n # Scale back velocities - https://github.com/OGGM/oggm/issues/1014\n new_vel = np.sqrt(vx**2 + vy**2)\n p_ok = new_vel > 1e-5 # avoid div by zero\n vx[p_ok] = vx[p_ok] * orig_vel[p_ok] / new_vel[p_ok]\n vy[p_ok] = vy[p_ok] * orig_vel[p_ok] / new_vel[p_ok]\n\n # And transform to local map\n vx = grid_gla.map_gridded_data(vx, grid=grid_vel, interp='linear')\n vy = grid_gla.map_gridded_data(vy, grid=grid_vel, interp='linear')\n\n # Write\n with utils.ncDataset(gdir.get_filepath('gridded_data'), 'a') as nc:\n vn = 'obs_icevel_x'\n if do_error:\n vn = vn.replace('obs', 'err')\n if vn in nc.variables:\n v = nc.variables[vn]\n else:\n v = nc.createVariable(vn, 'f4', ('y', 'x', ), zlib=True)\n v.units = 'm yr-1'\n ln = 'ITS LIVE velocity data in x map direction'\n if do_error:\n ln = 'Uncertainty of ' + ln\n v.long_name = ln\n v[:] = vx.filled(np.nan)\n\n vn = 'obs_icevel_y'\n if do_error:\n vn = vn.replace('obs', 'err')\n if vn in nc.variables:\n v = nc.variables[vn]\n else:\n v = nc.createVariable(vn, 'f4', ('y', 'x', ), zlib=True)\n v.units = 'm yr-1'\n ln = 'ITS LIVE velocity data in y map direction'\n if do_error:\n ln = 'Uncertainty of ' + ln\n v.long_name = ln\n v[:] = vy.filled(np.nan)\n\n\[email protected]_task(log, writes=['gridded_data'])\ndef velocity_to_gdir(gdir, add_error=False):\n \"\"\"Reproject the its_live files to the given glacier directory.\n\n Variables are added to the gridded_data nc file.\n\n Reprojecting velocities from one map proj to another is done\n reprojecting the vector distances. In this process, absolute velocities\n might change as well because map projections do not always preserve\n distances -> we scale them back to the original velocities as per the\n ITS_LIVE documentation that states that velocities are given in\n ground units, i.e. absolute velocities.\n\n We use bilinear interpolation to reproject the velocities to the local\n glacier map.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n where to write the data\n add_error : bool\n also reproject and scale the error data\n \"\"\"\n\n if not gdir.has_file('gridded_data'):\n raise InvalidWorkflowError('Please run `glacier_masks` before running '\n 'this task')\n\n _reproject_and_scale(gdir, do_error=False)\n if add_error:\n _reproject_and_scale(gdir, do_error=True)\n" ]
[ [ "numpy.all", "numpy.sqrt" ] ]
awai54st/LUTNet
[ "81b044f31d1131bee1a7fae41fc4d2fb102ea73a" ]
[ "tiled-lutnet/lutnet/h5py-2-hls/CIFAR_10/h52header_51lut_tm_spase.py" ]
[ "import h5py\nimport numpy as np\n\ndef SignNumpy(x):\n return np.greater(x,0)\n\n# convert a fully connected binarized layer plus batch normalization into \n# the simplified form (binary weight and positive threshold)\n# note that the neurons are assumed to be in the columns of the weight\n# matrix\ndef makeBNComplex(after_bn_thres, fanin, beta, gamma, mean, invstd, use_rowmajor=False, usePopCount=True):\n outs = fanin.shape[0]\n print (\"Extracting FCBN complex, outs = %d\" % (outs))\n # we'll fill in the binarized weights and thresholds iteratively\n# w_bin = range(ins*outs)\n thresholds = range(outs)\n for neuron in range(outs):\n # compute a preliminary threshold from the batchnorm parameters\n thres = mean[neuron] + ((after_bn_thres - beta[neuron]) / (abs(gamma[neuron]*invstd[neuron])+1e-4))\n need_flip = 0\n # ensure all neurons activate on the \"positive\" side, so we can use\n # greater-than-threshold activation\n# if gamma[neuron]*invstd[neuron] < 0:\n# need_flip = 1\n# thres = -thres\n# if thres > 32767:\n# thres = 32767\n# if thres < -32768:\n# thres = -32768\n # turn threshold into \"number of 1s\" (popcount) instead of signed sum\n if usePopCount:\n #thresholds[neuron] = int((fanin[neuron] + thres) / 2)\n thresholds[neuron] = (fanin[neuron] + thres) / 2\n else:\n thresholds[neuron] = thres\n# # binarize the synapses\n# for synapse in range(ins):\n# # note how we change from col major to row major if requested\n# dest_ind = neuron*ins+synapse if use_rowmajor else synapse*outs+neuron\n# if need_flip:\n# w_bin[dest_ind] = binarize(-weights[synapse][neuron])\n# else:\n# w_bin[dest_ind] = binarize(weights[synapse][neuron])\n# # reshape the output as desired\n# if use_rowmajor:\n# w_bin = np.asarray(w_bin).reshape((outs, ins))\n# else:\n# w_bin = np.asarray(w_bin).reshape((ins, outs))\n \n#return (w_bin, thresholds)\n return thresholds\n\n\n# binarize and pack convolutional layer weights into a matrix and compute\n# thresholds from the conv bias and batchnorm parameters\ndef makeConvBNComplex(fanin, beta, gamma, mean, invstd, interleaveChannels=False, usePopCount=True):\n numOut = fanin.shape[0]\n print (\"Extracting conv-BN complex, OFM=%d\" % (numOut))\n # the fanin is used to ensure positive-only threshold\n# w_bin = range(numOut * numIn * k * k)\n # one threshold per output channel\n thresholds = range(numOut)\n# dest_ind = 0\n # we'll fill in the binarized weights and thresholds iteratively\n for neuron in range(numOut):\n # compute a preliminary threshold from the batchnorm parameters,\n # subtracting the conv bias from the batchnorm mean\n thres = mean[neuron] - (beta[neuron] / (gamma[neuron]*invstd[neuron]))\n# need_flip = 0\n # ensure all neurons activate on the \"positive\" side, so we can use\n # greater-than-threshold activation\n if gamma[neuron]*invstd[neuron] < 0:\n# need_flip = 1\n thres = -thres\n # turn threshold into \"number of 1s\" (popcount) instead of signed sum\n if usePopCount:\n thresholds[neuron] = int((fanin[neuron] + thres) / 2)\n else:\n thresholds[neuron] = thres\n# # go through each weight of each convolutional kernel\n# if interleaveChannels:\n# for ky in range(k):\n# for kx in range(k):\n# for ifm in range(numIn):\n# f = -1 if need_flip else +1\n# w_bin[dest_ind] = binarize(f*weights[neuron][ifm][ky][kx])\n# dest_ind += 1\n# else:\n# for ifm in range(numIn):\n# for ky in range(k):\n# for kx in range(k):\n# f = -1 if need_flip else +1\n# w_bin[dest_ind] = binarize(f*weights[neuron][ifm][ky][kx])\n# dest_ind += 1\n# \n# # reshape the output as desired\n# w_bin = np.asarray(w_bin).reshape((numOut, fanin))\n# return (w_bin, thresholds)\n return thresholds\n\nif __name__ == \"__main__\":\n\n print(\"Loading the pretrained parameters...\")\n\n bl = h5py.File(\"pretrained_network_51lut_tm.h5\", 'r')\n #bl = h5py.File(\"dummy.h5\", 'r')\n \n # init model parameter lists\n\n batch_norm_eps=1e-4\n weights_w = []\n weights_c = []\n gammas = []\n means = []\n pruning_masks = []\n rand_maps = []\n bn_betas = []\n bn_gammas = []\n bn_means = []\n bn_inv_stds = []\n TN = [3,8,8,8,8,8,8,8,8] # hand-coded tiling factors for all layers\n TM = [8,8,8,8,8,8,8,8,10]\n \n # conv layer 1\n \n bl_w1 = np.array(bl[\"model_weights\"][\"binary_conv_1\"][\"binary_conv_1\"][\"Variable_1:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_conv_1\"][\"binary_conv_1\"][\"rand_map_0:0\"])\n# bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_conv_1\"][\"binary_conv_1\"][\"pruning_mask:0\"]).reshape(bl_w1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_conv_1\"][\"binary_conv_1\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_1\"][\"batch_normalization_1\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_1\"][\"batch_normalization_1\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_1\"][\"batch_normalization_1\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_1\"][\"batch_normalization_1\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_1\"][\"residual_sign_1\"][\"means:0\"]\n\n ##Pruning\n #bl_w1 = bl_w1 * np.reshape(bl_pruning_mask, (bl_w1.shape))\n \n w_bram = [bl_w1]\n weights_w.extend([w_bram])\n c_lut = [np.ones([3,3,1,8])]\n weights_c.extend([c_lut])\n\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([np.ones([3,3,1,8])]) \n #gammas = [gammas, bl_gamma]\n gammas=[bl_gamma]\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n# pruning_masks=[bl_pruning_mask]\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps=[bl_rand_map_0]\n #means = [means, bl_means]\n means=[bl_means]\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas=[bl_bn_beta]\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas=[bl_bn_gamma]\n #bn_means = [bn_means, bl_bn_mean]\n bn_means=[bl_bn_mean]\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds=[bl_bn_inv_std]\n \n # conv layer 2\n\n bl_c1 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_conv_2\"][\"binary_conv_2\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_2\"][\"batch_normalization_2\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_2\"][\"batch_normalization_2\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_2\"][\"batch_normalization_2\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_2\"][\"batch_normalization_2\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_2\"][\"residual_sign_2\"][\"means:0\"]\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n \n # conv layer 3\n\n\n bl_c1 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_conv_3\"][\"binary_conv_3\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_3\"][\"batch_normalization_3\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_3\"][\"batch_normalization_3\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_3\"][\"batch_normalization_3\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_3\"][\"batch_normalization_3\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_3\"][\"residual_sign_3\"][\"means:0\"]\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n\n \n # conv layer 4\n\n bl_c1 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_conv_4\"][\"binary_conv_4\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_4\"][\"batch_normalization_4\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_4\"][\"batch_normalization_4\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_4\"][\"batch_normalization_4\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_4\"][\"batch_normalization_4\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_4\"][\"residual_sign_4\"][\"means:0\"]\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n \n \n # conv layer 5\n\n bl_c1 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_conv_5\"][\"binary_conv_5\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_5\"][\"batch_normalization_5\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_5\"][\"batch_normalization_5\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_5\"][\"batch_normalization_5\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_5\"][\"batch_normalization_5\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_5\"][\"residual_sign_5\"][\"means:0\"]\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n \n # conv layer 6\n\n\n bl_c1 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_conv_6\"][\"binary_conv_6\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_6\"][\"batch_normalization_6\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_6\"][\"batch_normalization_6\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_6\"][\"batch_normalization_6\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_6\"][\"batch_normalization_6\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_6\"][\"residual_sign_6\"][\"means:0\"]\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n \n # dense layer 1\n \n bl_c1 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_dense_1\"][\"binary_dense_1\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_7\"][\"batch_normalization_7\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_7\"][\"batch_normalization_7\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_7\"][\"batch_normalization_7\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_7\"][\"batch_normalization_7\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_7\"][\"residual_sign_7\"][\"means:0\"]\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n \n \n # dense layer 2\n\n bl_c1 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_dense_2\"][\"binary_dense_2\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"moving_variance:0\"])+batch_norm_eps)\n \n bl_means = bl[\"model_weights\"][\"residual_sign_8\"][\"residual_sign_8\"][\"means:0\"]\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n\n # dense layer 3\n\n bl_c1 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_1:0\"])\n bl_c2 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_2:0\"])\n bl_c3 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_3:0\"])\n bl_c4 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_4:0\"])\n bl_c5 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_5:0\"])\n bl_c6 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_6:0\"])\n bl_c7 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_7:0\"])\n bl_c8 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_8:0\"]) \n bl_c9 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_9:0\"])\n bl_c10 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_10:0\"])\n bl_c11 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_11:0\"])\n bl_c12 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_12:0\"])\n bl_c13 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_13:0\"])\n bl_c14 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_14:0\"])\n bl_c15 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_15:0\"])\n bl_c16 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_16:0\"])\n bl_c17 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_17:0\"])\n bl_c18 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_18:0\"])\n bl_c19 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_19:0\"])\n bl_c20 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_20:0\"])\n bl_c21 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_21:0\"])\n bl_c22 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_22:0\"])\n bl_c23 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_23:0\"])\n bl_c24 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_24:0\"]) \n bl_c25 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_25:0\"])\n bl_c26 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_26:0\"])\n bl_c27 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_27:0\"])\n bl_c28 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_28:0\"])\n bl_c29 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_29:0\"])\n bl_c30 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_30:0\"])\n bl_c31 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_31:0\"])\n bl_c32 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_32:0\"])\n bl_w1 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable_33:0\"])\n bl_rand_map_0 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"rand_map_0:0\"])\n bl_rand_map_1 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"rand_map_1:0\"])\n bl_rand_map_2 = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"rand_map_2:0\"])\n bl_pruning_mask = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"pruning_mask:0\"]).reshape(bl_c1.shape)\n bl_gamma = np.array(bl[\"model_weights\"][\"binary_dense_3\"][\"binary_dense_3\"][\"Variable:0\"])\n \n bl_bn_beta = np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"beta:0\"])\n bl_bn_gamma = np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"gamma:0\"])\n bl_bn_mean = np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"moving_mean:0\"])\n bl_bn_inv_std = 1/np.sqrt(np.array(bl[\"model_weights\"][\"batch_normalization_8\"][\"batch_normalization_8\"][\"moving_variance:0\"])+batch_norm_eps)\n \n w_bram = [bl_w1]\n c_lut = [bl_c1*bl_pruning_mask, bl_c2*bl_pruning_mask, bl_c3*bl_pruning_mask, bl_c4*bl_pruning_mask, bl_c5*bl_pruning_mask, bl_c6*bl_pruning_mask, bl_c7*bl_pruning_mask, bl_c8*bl_pruning_mask, bl_c9*bl_pruning_mask, bl_c10*bl_pruning_mask, bl_c11*bl_pruning_mask, bl_c12*bl_pruning_mask, bl_c13*bl_pruning_mask, bl_c14*bl_pruning_mask, bl_c15*bl_pruning_mask, bl_c16*bl_pruning_mask, bl_c17*bl_pruning_mask, bl_c18*bl_pruning_mask, bl_c19*bl_pruning_mask, bl_c20*bl_pruning_mask, bl_c21*bl_pruning_mask, bl_c22*bl_pruning_mask, bl_c23*bl_pruning_mask, bl_c24*bl_pruning_mask, bl_c25*bl_pruning_mask, bl_c26*bl_pruning_mask, bl_c27*bl_pruning_mask, bl_c28*bl_pruning_mask, bl_c29*bl_pruning_mask, bl_c30*bl_pruning_mask, bl_c31*bl_pruning_mask, bl_c32*bl_pruning_mask]\n r_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2]\n #weights = [weights, w_lut]\n weights_c.extend([c_lut])\n weights_w.extend([w_bram])\n #gammas = [gammas, bl_gamma]\n gammas.extend([bl_gamma])\n #pruning_masks = [pruning_masks, bl_pruning_mask]\n pruning_masks.extend([bl_pruning_mask])\n #rand_maps = [rand_maps, bl_rand_map]\n rand_maps.extend([r_map])\n #means = [means, bl_means]\n means.extend([bl_means])\n #bn_betas = [bn_betas, bl_bn_beta]\n bn_betas.extend([bl_bn_beta])\n #bn_gammas = [bn_gammas, bl_bn_gamma]\n bn_gammas.extend([bl_bn_gamma])\n #bn_means = [bn_means, bl_bn_mean]\n bn_means.extend([bl_bn_mean])\n #bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]\n bn_inv_stds.extend([bl_bn_inv_std])\n\n\n \n\n print(\"Binarizing the pretrained parameters...\")\n\n # Binarize the weights\n for i in range(1,9):\n for j in range(32):\n weights_c[i][j] = SignNumpy(weights_c[i][j])# first layer has no c parameter\n\n for i in range(0,9):\n for j in range(1):\n weights_w[i][j] = SignNumpy(weights_w[i][j])\n\n # write header file\n with open('../codegen_output/weights.h', 'w') as f:\n f.write('#pragma once\\n')\n with open('../codegen_output/weights.h', 'a') as f:\n f.write('//Generated weights for CIFAR-10\\n')\n\n for layer_id in range(9):\n # generate weights\n if layer_id==0: # first layer: fxp inputs and binary weights\n weights_c_per_act = 0\n weights_w_per_act = 1\n extra_activations = 0\n else:\n weights_c_per_act = 32 # weights_per_act = #_of_bits_per_act x 2 ^ #_of_lut_inputs\n weights_w_per_act = 1\n extra_activations = 3 # no. of extra_activations = no. of activations per LUT - 1\n\n dims_c = np.shape(pruning_masks[layer_id])\n dims_w = np.shape(weights_w[layer_id][0])\n if len(dims_w)==2:\n layer_type = \"fc\"\n word_length_w = dims_w[0]\n word_length_c = dims_c[0]\n nfilters_w = dims_w[1]\n nfilters_c = dims_c[1]\n ninch_w = dims_w[0]\n ninch_c = dims_c[0]\n elif len(dims_w)==4:\n layer_type = \"conv\"\n word_length_w = dims_w[0]*dims_w[1]*dims_w[2]\n# if layer_id != 0: \n word_length_c = dims_c[0]*dims_c[1]*dims_c[2]\n nfilters_w = dims_w[3]\n# if layer_id != 0: \n nfilters_c = dims_c[3]\n ninch_w = dims_w[2]\n ninch_c = dims_c[2]\n\n\n# # write weights to header file\n# for weight_id in range(weights_w_per_act):\n# mat = weights_w[layer_id][weight_id]\n# if layer_type==\"fc\":\n# mat_flat = mat.transpose(1,0).flatten()\n# elif layer_type==\"conv\":\n# mat_flat = mat.transpose(3,0,1,2).flatten()\n# else:\n# print(\"unknown weight format!\")\n#\n# with open('../codegen_output/weights.h', 'a') as f:\n# f.write('//Array shape: {}\\n'.format(dims_w))\n# fold = (word_length_w-1)/32 + 1\n# f.write(\"const ap_uint<32> \" + \"weights_w_\" + layer_type + str(layer_id+1) + \"_\" + str(weight_id+1) + \"[\"+str(nfilters_w*fold) + \"] = {\")\n# bin_append = 0\n# for i, ele in enumerate(mat_flat):\n# #bin_append = (bin_append << 1) | (int(ele) # left-first bit-push\n# bin_append = bin_append | (int(ele) << (i % word_length_w)) # right-first bit-push\n# if (i % word_length_w == (word_length_w - 1)):\n# mask = 0xFFFFFFFF\n# for i_32b in range(fold):\n# #word = (bin_append>>(32*(fold-i_32b-1))) & mask # Big-endian: left-first word-push\n# word = (bin_append>>(32*i_32b)) & mask # Little-endian: right-first word-push\n# hex_word = '%X' % word\n# if i_32b!=0:\n# f.write(', ')\n# f.write('0x' + hex_word)\n# bin_append = 0\n# if i != nfilters_w*word_length_w-1:\n# f.write(', ')\n# f.write('};\\n')\n\n # write weights to header file\n for weight_id in range(weights_w_per_act):\n mat = weights_w[layer_id][weight_id]\n if layer_type==\"conv\":\n mat = np.stack(np.split(mat.reshape(-1,ninch_w,nfilters_w), ninch_w/ninch_c, axis=1), axis=3)\n mat = np.stack(np.split(mat.reshape(-1,nfilters_w,ninch_w/ninch_c), nfilters_w/nfilters_c, axis=1), axis=3).transpose(1,2,3,0) # mat[M/TM][TN][TM][K*K*N/TN]\n if layer_id!=0:\n pruning_mask = pruning_masks[layer_id].transpose(3,0,1,2).reshape(nfilters_c,-1)# pruning_mask[M/TM][K*K*N/TN]\n elif layer_type==\"fc\":\n mat = np.stack(np.split(mat, ninch_w/ninch_c, axis=0), axis=2)\n mat = np.stack(np.split(mat, nfilters_w/nfilters_c, axis=1), axis=3).transpose(1,2,3,0) # mat[M/TM][TN][TM][K*K*N/TN]\n pruning_mask = pruning_masks[layer_id].transpose(1,0)# pruning_mask[M/TM][N/TN]\n else:\n print(\"unknown weight format!\")\n\n with open('../codegen_output/weights.h', 'a') as f:\n fold = (word_length_c-1)/32 + 1\n f.write('//Array shape: {}\\n'.format([nfilters_c,ninch_w/ninch_c,nfilters_w/nfilters_c,fold]))\n f.write(\"static ap_uint<32> \" + \"weights_w_\" + layer_type + str(layer_id+1) + \"_\" + str(weight_id+1) + \"[\"+str(nfilters_c) + \"][\"+str(ninch_w/ninch_c) + \"][\"+str(nfilters_w/nfilters_c) + \"][\"+str(fold) + \"] = {\")\n for t1 in range(nfilters_c):\n if t1!=0:\n f.write(\",\")\n f.write(\"{\")\n for t2 in range(ninch_w/ninch_c):\n if t2!=0:\n f.write(\",\")\n f.write(\"{\")\n for t3 in range(nfilters_w/nfilters_c):\n if t3!=0:\n f.write(\",\")\n f.write(\"{\")\n bin_append = 0\n for i, ele in enumerate(mat[t1][t2][t3]):\n #bin_append = (bin_append << 1) | (int(ele) # left-first bit-push\n bin_append = bin_append | (int(ele) << (i % word_length_c)) # right-first bit-push\n if (i == word_length_c-1):\n mask = 0xFFFFFFFF\n for i_32b in range(fold):\n #word = (bin_append>>(32*(fold-i_32b-1))) & mask # Big-endian: left-first word-push\n word = (bin_append>>(32*i_32b)) & mask # Little-endian: right-first word-push\n hex_word = '%X' % word\n if i_32b!=0:\n f.write(', ')\n f.write('0x' + hex_word)\n bin_append = 0\n f.write(\"}\")\n f.write(\"}\")\n f.write(\"}\")\n\n\n f.write('};\\n')\n\n if layer_id != 0: \n # write lut parameters to header file\n for weight_id in range(weights_c_per_act):\n mat = weights_c[layer_id][weight_id]\n if layer_type==\"fc\":\n mat_flat = mat.transpose(1,0).flatten()\n elif layer_type==\"conv\":\n mat_flat = mat.transpose(3,0,1,2).flatten()\n else:\n print(\"unknown weight format!\")\n\n with open('../codegen_output/weights.h', 'a') as f:\n f.write('//Array shape: {}\\n'.format(dims_c))\n fold = (word_length_c-1)/32 + 1\n f.write(\"const ap_uint<32> \" + \"weights_c_\" + layer_type + str(layer_id+1) + \"_\" + str(weight_id+1) + \"[\"+str(nfilters_c*fold) + \"] = {\")\n bin_append = 0\n for i, ele in enumerate(mat_flat):\n #bin_append = (bin_append << 1) | (int(ele) # left-first bit-push\n bin_append = bin_append | (int(ele) << (i % word_length_c)) # right-first bit-push\n if (i % word_length_c == (word_length_c - 1)):\n mask = 0xFFFFFFFF\n for i_32b in range(fold):\n #word = (bin_append>>(32*(fold-i_32b-1))) & mask # Big-endian: left-first word-push\n word = (bin_append>>(32*i_32b)) & mask # Little-endian: right-first word-push\n hex_word = '%X' % word\n if i_32b!=0:\n f.write(', ')\n f.write('0x' + hex_word)\n bin_append = 0\n if i != nfilters_c*word_length_c-1:\n f.write(', ')\n f.write('};\\n')\n\n if layer_id != 0:\n\n # generate verilog source file for LUTARRAY: Vivado HLS will take forever\n if layer_id==1:\n modname = 'LUTARRAY_1'\n elif layer_id==2:\n modname = 'LUTARRAY'\n elif layer_id==3:\n modname = 'LUTARRAY_2'\n elif layer_id==4:\n modname = 'LUTARRAY_5'\n elif layer_id==5:\n modname = 'LUTARRAY_6'\n elif layer_id==6:\n modname = 'LUTARRAY_3'\n elif layer_id==7:\n modname = 'LUTARRAY_4'\n elif layer_id==8:\n modname = 'LUTARRAY_7'\n\n if layer_id != 8: # the 8th layer has different variable names\n\n mat_flat = []\n for weight_id in range(weights_c_per_act):\n mat = weights_c[layer_id][weight_id]\n pm = pruning_masks[layer_id]#.transpose(3,0,1,2).flatten()\n if layer_type==\"fc\":\n mat = mat.transpose(1,0)\n pm_flat = pm.transpose(1,0)\n elif layer_type==\"conv\":\n mat = mat.transpose(3,0,1,2).reshape((nfilters_c, -1))\n pm_flat = pm.transpose(3,0,1,2).reshape((nfilters_c, -1))\n else:\n print(\"unknown weight format!\")\n mat_flat.extend([mat])\n \n with open('../codegen_output/'+modname+'.v', 'w') as v0:\n v0.write('`timescale 1 ns / 1 ps\\n\\n')\n v0.write('module '+modname+' (\\n in_V,\\n in_1_V,\\n in_2_V,\\n in_3_V')\n for tm in range(nfilters_c):\n v0.write(',\\n weight_0_' + str(tm) + '_V_read')\n for tm in range(nfilters_c):\n v0.write(',\\n ap_return_' + str(tm))\n v0.write(');\\n\\n')\n \n with open('../codegen_output/'+modname+'.v', 'a') as v0:\n v0.write('\\n\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_V;\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_1_V;\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_2_V;\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_3_V;\\n')\n for tm in range(nfilters_c):\n v0.write('input [' + str(((word_length_c-1)/32+1)*32-1) + ':0] weight_0_' + str(tm) + '_V_read;\\n')\n for tm in range(nfilters_c):\n v0.write('output [' + str(word_length_c-1) + ':0] ap_return_' + str(tm) + ';\\n')\n for tm in range(nfilters_c):\n for ti, ele in enumerate(pm_flat[tm]):\n if ele==1:\n v0.write('wire tmp_' + str(tm) + '_' + str(ti) + ';\\n')\n v0.write('assign tmp_' + str(tm) + '_' + str(ti) + ' = ')\n v0.write('(' + str(int(mat_flat[0][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[1][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[2][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[3][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[4][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[5][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[6][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[7][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[8][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[9][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[10][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[11][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[12][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[13][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[14][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[15][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[16][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[17][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[18][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[19][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[20][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[21][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[22][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[23][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[24][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[25][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[26][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[27][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[28][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[29][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[30][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_0_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[31][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_0_' + str(tm) + '_V_read[' + str(ti) + ']);\\n')\n \n v0.write('assign ap_return_' + str(tm) + ' = {')\n for ti, ele in enumerate(pm_flat[tm]):\n if ele == 0:\n v0.write(\"1'b0\")\n elif ele == 1:\n v0.write('tmp_' + str(tm) + '_' + str(ti))\n else:\n print(\"pruning mask elements must be binary!\")\n if ti != word_length_c-1:\n v0.write(',')\n else:\n v0.write('};\\n')\n v0.write('endmodule')\n\n\n else: # the 8th layer has different variable names\n\n mat_flat = []\n for weight_id in range(weights_c_per_act):\n mat = weights_c[layer_id][weight_id]\n pm = pruning_masks[layer_id]#.transpose(3,0,1,2).flatten()\n if layer_type==\"fc\":\n mat = mat.transpose(1,0)\n pm_flat = pm.transpose(1,0)\n elif layer_type==\"conv\":\n mat = mat.transpose(3,0,1,2).reshape((nfilters_c, -1))\n pm_flat = pm.transpose(3,0,1,2).reshape((nfilters_c, -1))\n else:\n print(\"unknown weight format!\")\n mat_flat.extend([mat])\n \n with open('../codegen_output/'+modname+'.v', 'w') as v0:\n v0.write('`timescale 1 ns / 1 ps\\n\\n')\n v0.write('module '+modname+' (\\n in_V,\\n in_1_V,\\n in_2_V,\\n in_3_V')\n for tm in range(nfilters_c):\n v0.write(',\\n weight_' + str(tm) + '_V_read')\n v0.write(',\\n ap_return);\\n\\n')\n \n with open('../codegen_output/'+modname+'.v', 'a') as v0:\n v0.write('\\n\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_V;\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_1_V;\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_2_V;\\n')\n v0.write('input [' + str(word_length_c-1) + ':0] in_3_V;\\n')\n for tm in range(nfilters_c):\n v0.write('input [' + str(((word_length_c-1)/32+1)*32-1) + ':0] weight_' + str(tm) + '_V_read;\\n')\n for tm in range(nfilters_c):\n v0.write('output [' + str(word_length_c-1) + ':0] ap_return;\\n')\n for tm in range(nfilters_c):\n for ti, ele in enumerate(pm_flat[tm]):\n if ele==1:\n v0.write('wire tmp_' + str(tm) + '_' + str(ti) + ';\\n')\n v0.write('assign tmp_' + str(tm) + '_' + str(ti) + ' = ')\n v0.write('(' + str(int(mat_flat[0][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[1][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[2][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[3][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[4][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[5][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[6][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[7][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[8][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[9][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[10][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[11][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[12][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[13][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[14][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[15][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[16][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[17][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[18][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[19][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[20][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[21][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[22][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[23][tm][ti])) + ' & ~in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[24][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[25][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[26][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[27][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[28][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[29][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[30][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & weight_' + str(tm) + '_V_read[' + str(ti) + ']) | ')\n v0.write('(' + str(int(mat_flat[31][tm][ti])) + ' & ~in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~weight_' + str(tm) + '_V_read[' + str(ti) + ']);\\n')\n \n v0.write('assign ap_return = {')\n for ti, ele in enumerate(pm_flat[tm]):\n if ele == 0:\n v0.write(\"1'b0\")\n elif ele == 1:\n v0.write('tmp_' + str(tm) + '_' + str(ti))\n else:\n print(\"pruning mask elements must be binary!\")\n if ti != word_length_c-1:\n v0.write(',')\n else:\n v0.write('};\\n')\n v0.write('endmodule')\n\n\n # generate threshold\n if layer_id!=8: # the last layer does not need threshold\n use_popcount = not(layer_id==0)\n next_means_b0 = abs(means[layer_id][0])\n print(next_means_b0)\n next_means_b1 = abs(means[layer_id][1])\n print(next_means_b1)\n if layer_type==\"conv\":\n if layer_id != 0: \n fanin = np.sum(np.tile(pruning_masks[layer_id], [dims_w[0]/dims_c[0],dims_w[1]/dims_c[1],dims_w[2]/dims_c[2],dims_w[3]/dims_c[3]]).reshape(-1,dims_w[3]),axis=0)\n else:\n fanin = np.sum(np.ones((dims_w[0]*dims_w[1]*dims_w[2],dims_w[3])),axis=0)\n elif layer_type==\"fc\":\n fanin = np.sum(pruning_masks[layer_id],axis=0) * (dims_w[0]*dims_w[1]/dims_c[0]/dims_c[1])\n if layer_id!=0:\n fanin = fanin * abs(gammas[layer_id] * means[layer_id-1][0]) + fanin * abs(gammas[layer_id] * means[layer_id-1][1])\n thresholds = np.array(makeBNComplex(0, fanin, bn_betas[layer_id], bn_gammas[layer_id], bn_means[layer_id], bn_inv_stds[layer_id], usePopCount=use_popcount))\n next_means_bn_b0 = np.array(makeBNComplex(next_means_b0, fanin, bn_betas[layer_id], bn_gammas[layer_id], bn_means[layer_id], bn_inv_stds[layer_id], usePopCount=use_popcount)) - thresholds\n\n with open('../codegen_output/weights.h', 'a') as f:\n f.write(\"const ap_fixed<24, 16> \" + \"thresh_\" + layer_type + str(layer_id+1) + \"[\"+str(len(thresholds))+\"] = {\")\n for i, ele in enumerate(thresholds):\n if i == 0:\n f.write(str(ele))\n else:\n f.write(','+ str(ele))\n f.write('};\\n')\n f.write(\"const ap_fixed<24, 16> \" + \"next_layer_means_\" + layer_type + str(layer_id+1) + \"[\"+str(len(next_means_bn_b0))+\"] = {\")\n for i, ele in enumerate(next_means_bn_b0):\n if i == 0:\n f.write(str(ele))\n else:\n f.write(','+ str(ele))\n f.write('};\\n')\n# # generate next layer mean\n# if layer_id!=8:\n# with open('../codegen_output/weights.h', 'a') as f:\n# next_means_b0 = abs(means[layer_id][0])\n# next_means_b1 = abs(means[layer_id][1])\n# f.write(\"const ap_fixed<24, 16> \" + \"next_layer_means_\" + layer_type + str(layer_id+1) + \"[2] = {\")\n# f.write(str(next_means_b0))\n# f.write(','+ str(next_means_b1))\n# f.write('};\\n')\n\n # generate pruning mask\n if layer_id!=0:\n with open('../codegen_output/weights.h', 'a') as f:\n\n fold = (word_length_c-1)/32 + 1\n #f.write('//Array shape: {}\\n'.format([nfilters_c,ninch_w/ninch_c,nfilters_w/nfilters_c,fold]))\n f.write(\"static ap_uint<32> \" + \"pruning_mask_\" + layer_type + str(layer_id+1) + \"_1[\"+str(nfilters_c) + \"][\"+str(fold) + \"] = {\")\n for t1 in range(nfilters_c):\n if t1!=0:\n f.write(\",\")\n f.write(\"{\")\n bin_append = 0\n for i, ele in enumerate(pruning_mask[t1]):\n #bin_append = (bin_append << 1) | (int(ele) # left-first bit-push\n bin_append = bin_append | (int(ele) << (i % word_length_c)) # right-first bit-push\n if (i == word_length_c-1):\n mask = 0xFFFFFFFF\n for i_32b in range(fold):\n #word = (bin_append>>(32*(fold-i_32b-1))) & mask # Big-endian: left-first word-push\n word = (bin_append>>(32*i_32b)) & mask # Little-endian: right-first word-push\n hex_word = '%X' % word\n if i_32b!=0:\n f.write(', ')\n f.write('0x' + hex_word)\n bin_append = 0\n f.write(\"}\")\n\n\n f.write('};\\n')\n\n # generate random map\n with open('../codegen_output/weights.h', 'a') as f:\n for rand_map_id in range(extra_activations):\n rand_map = rand_maps[layer_id][rand_map_id].flatten().astype(np.uint32)\n f.write(\"const unsigned int \" + \"rand_map_\" + layer_type + str(layer_id+1) + \"_\" + str(rand_map_id+1) + \"[\"+str(len(rand_map))+\"] = {\")\n for i, ele in enumerate(rand_map):\n if i == 0:\n f.write(str(ele))\n else:\n f.write(','+ str(ele))\n f.write('};\\n')\n # generate alpha\n with open('../codegen_output/weights.h', 'a') as f:\n if layer_id!=0:\n alpha_b0 = abs(gammas[layer_id] * means[layer_id-1][0])\n alpha_b1 = abs(gammas[layer_id] * means[layer_id-1][1])\n f.write(\"const ap_fixed<24, 16> \" + \"alpha_\" + layer_type + str(layer_id+1) + \"[2] = {\")\n f.write(str(alpha_b0))\n f.write(','+ str(alpha_b1))\n f.write('};\\n')\n\n else:\n alpha_b0 = abs(gammas[layer_id])\n f.write(\"const ap_fixed<24, 16> \" + \"alpha_\" + layer_type + str(layer_id+1) + \"[1] = {\")\n f.write(str(alpha_b0))\n f.write('};\\n')\n\n\n\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.ones", "numpy.tile", "numpy.split", "numpy.shape", "numpy.greater" ] ]
LishenQ-1995/cpsc2021-python-entry
[ "f3592dae18cd23434e52a62f3d78ff9fb6a230ed" ]
[ "entry_2021_Q.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 4 20:50:10 2021\r\n\r\n@author: Lishen Qiu\r\n\"\"\"\r\n\r\n#!/usr/bin/env python3\r\n\r\nimport numpy as np\r\nimport os\r\nimport sys\r\n\r\nimport wfdb\r\n# from utils import qrs_detect, comp_cosEn, save_dict\r\n# import matplotlib # ๆณจๆ„่ฟ™ไธชไนŸ่ฆimportไธ€ๆฌก\r\n# import matplotlib.pyplot as plt\r\n# from score_2021 import RefInfo \r\nimport glob\r\nfrom tqdm import tqdm\r\n# import numpy as np\r\n# import os\r\n# import sys\r\nimport json\r\n# import wfdb\r\n# import pandas as pd\r\n# from scipy.io import loadmat,savemat\r\n# import random\r\nimport torch\r\nimport resunet_CPSC2021\r\n# from loss_CPSC2021 import dice_loss,dice_coef\r\n# from torch.utils.data import Dataset, DataLoader\r\n# import time\r\n\"\"\"\r\nWritten by: Xingyao Wang, Chengyu Liu\r\n School of Instrument Science and Engineering\r\n Southeast University, China\r\n [email protected]\r\n\r\nSave answers to '.json' files, the format is as {โ€˜predict_endpointsโ€™: [[s0, e0], [s1, e1], โ€ฆ, [sm-1, em-2]]}.\r\n\"\"\"\r\n\r\ndef save_dict(filename, dic):\r\n '''save dict into json file'''\r\n with open(filename,'w') as json_file:\r\n json.dump(dic, json_file, ensure_ascii=False)\r\n \r\ndef load_data(sample_path):\r\n sig, fields = wfdb.rdsamp(sample_path)\r\n length = len(sig)\r\n fs = fields['fs']\r\n\r\n return sig, length, fs\r\n\r\ndef ngrams_rr(data, length):\r\n grams = []\r\n for i in range(0, length-12, 12):\r\n grams.append(data[i: i+12])\r\n return grams\r\n\r\ndef challenge_entry(sample_path):\r\n\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # PyTorch v0.4.0\r\n model = resunet_CPSC2021.resunet().to(device)\r\n model.load_state_dict(torch.load('./CPSC2021_0.99398.pth'))#่ฏปๆจกๅž‹\r\n \r\n target_path=sample_path\r\n data_interval = 20\r\n data_length = 1600\r\n # flag=1\r\n data_files = glob.glob(target_path+'*.dat')\r\n data_files=sorted(data_files)\r\n \r\n for file_name in tqdm(data_files):\r\n file_name=file_name[0:-4]\r\n index_=file_name.index('d')\r\n name_=file_name[index_:]\r\n [sig, length, fs]=load_data(file_name)\r\n label_signal=np.zeros((length,))#่ฟ™ไธชlabel้•ฟๅบฆไธŽไฟกๅท้•ฟๅบฆไธ€ๆ ท๏ผŒAFๆ ‡่ฎฐ็ปด1๏ผŒๅ…ถไป–ๅœฐๆ–นๆ ‡่ฎฐไธบ0\r\n # print(\"ll\",length)\r\n # length2=length\r\n if length<data_length:\r\n sig_temp=np.zeros((data_length,2))\r\n sig_temp[0:length,:]=sig\r\n sig=sig_temp\r\n length=data_length\r\n cut_num =int((len(sig)-data_length)/data_interval+1)\r\n batch_size=64\r\n batch_data=np.zeros((batch_size,data_length,2))\r\n start_temp=np.zeros((batch_size,1))\r\n n=0\r\n # pre_AFPosition_start=[]\r\n # pre_AFPosition_end=[]\r\n \r\n label_start=[]#่ฎฐๅฝ•label_signal็š„่ตทๆญข็‚น\r\n label_end=[]\r\n for k in range(cut_num+1):\r\n \r\n \r\n start=data_interval*k\r\n end_=start+data_length\r\n if start+data_length>length:\r\n start=length-data_length\r\n end_=length\r\n label_start.append(start)\r\n label_end.append(end_)\r\n # print(start,end_)\r\n temp=sig[start:end_,:]\r\n batch_data[n,:,:]=temp\r\n start_temp[n]=start\r\n n=n+1\r\n if n==batch_size or start+data_length>length:\r\n batch_data2=np.expand_dims(batch_data,axis=2)\r\n batch_data2=torch.Tensor(batch_data2)\r\n batch_data2=batch_data2.permute(0,3,1,2)\r\n batch_data2=batch_data2.to(device)\r\n \r\n pre_label1=model(batch_data2)\r\n pre_label1=pre_label1.data.cpu().numpy()\r\n pre_label1[pre_label1>=0.5]=1\r\n pre_label1[pre_label1<0.5]=0#64,1\r\n \r\n \r\n \r\n \r\n for j in range(n):\r\n if pre_label1[j,0,0]==1:\r\n label_signal[label_start[j]:label_end[j],]=1\r\n \r\n n=0\r\n label_start=[]#่ฎฐๅฝ•label_signal็š„่ตทๆญข็‚น\r\n label_end=[] \r\n \r\n label_signal[0,]=0\r\n label_signal[-1,]=0 \r\n \r\n pre_label_diff=np.diff(label_signal)\r\n \r\n AF_start_batch=np.where(pre_label_diff==1)[0]\r\n AF_end_batch=np.where(pre_label_diff==-1)[0]\r\n \r\n valid_index=[]\r\n for m in range(len(AF_start_batch)):\r\n if AF_end_batch[m]-AF_start_batch[m]>=385 :\r\n valid_index.extend([int(m)])\r\n AF_start_batch=AF_start_batch[valid_index]\r\n AF_end_batch=AF_end_batch[valid_index]\r\n \r\n \r\n AF_start_batch=np.array(sorted(AF_start_batch.copy()),dtype=\"float64\")\r\n AF_end_batch=np.array(sorted(AF_end_batch.copy()),dtype=\"float64\")\r\n pre_position=[] \r\n\r\n\r\n if len(AF_start_batch)>0:\r\n if np.sum(label_signal)/len(label_signal)>=0.90 or len(AF_start_batch)>=30:\r\n pre_position.append([0,len(label_signal)-1]) \r\n elif np.sum(label_signal)/len(label_signal)<0.10:\r\n pre_position=[]\r\n \r\n else:\r\n \r\n for m in range(len(AF_start_batch)):\r\n if (AF_end_batch[m]-800)-(AF_start_batch[m]+800)>=385:\r\n\r\n pre_position.append([AF_start_batch[m]+800,AF_end_batch[m]-800])\r\n else:\r\n pre_position=[]\r\n pred_dcit = {'predict_endpoints': pre_position}\r\n # save_dict(os.path.join(RESULT_PATH, sample+'.json'), pred_dict)\r\n save_dict(os.path.join(RESULT_PATH, name_+'.json'), pred_dcit)\r\n return pred_dcit\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\r\n # DATA_PATH=\"/home/ps/cwq/2021็”Ÿ็†ๅ‚ๆ•ฐๆŒ‘ๆˆ˜่ต›/ไปฃ็ 4ๆˆฟ้ขคๅˆ†็ฑป/ๅˆ†ๆ•ฐ/้ชŒ่ฏ้›†ๆ•ฐๆฎ/้ชŒ่ฏ้›†ๆ•ฐๆฎ/\"\r\n # RESULT_PATH=\"/home/ps/cwq/2021็”Ÿ็†ๅ‚ๆ•ฐๆŒ‘ๆˆ˜่ต›/ไปฃ็ 4ๆˆฟ้ขคๅˆ†็ฑป/ๅˆ†ๆ•ฐ/้ชŒ่ฏ้›†ๆ•ฐๆฎ/้ชŒ่ฏ้›†็ป“ๆžœ/\"\r\n \r\n DATA_PATH = sys.argv[1]\r\n RESULT_PATH = sys.argv[2]\r\n if not os.path.exists(RESULT_PATH):\r\n os.makedirs(RESULT_PATH)\r\n \r\n # test_set = open(os.path.join(DATA_PATH, 'RECORDS'), 'r').read().splitlines()\r\n # for i, sample in enumerate(test_set):\r\n # print(sample)\r\n # sample_path = os.path.join(DATA_PATH, sample)\r\n pred_dict = challenge_entry(DATA_PATH)\r\n\r\n \r\n\r\n" ]
[ [ "numpy.zeros", "numpy.sum", "numpy.diff", "numpy.where", "torch.cuda.is_available", "torch.load", "torch.Tensor", "numpy.expand_dims" ] ]
pha-nguyen/DEFT
[ "7e437262317d2484ebb79add67254b2cb014eeb0" ]
[ "src/lib/model/model.py" ]
[ "# ------------------------------------------------------------------------------\n# Licensed under the MIT License.\n# Written by Xingyi Zhou ([email protected])\n# Source: https://github.com/xingyizhou/CenterTrack/\n# Modified by Mohamed Chaabane\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torchvision.models as models\nimport torch\nimport torch.nn as nn\nimport os\n\nfrom .networks.dla import DLASeg\nfrom .networks.resdcn import PoseResDCN\nfrom .networks.resnet import PoseResNet\nfrom .networks.dlav0 import DLASegv0\nfrom .networks.generic_network import GenericNetwork\n\n_network_factory = {\n \"resdcn\": PoseResDCN,\n \"dla\": DLASeg,\n \"res\": PoseResNet,\n \"dlav0\": DLASegv0,\n \"generic\": GenericNetwork,\n}\n\n\ndef create_model(arch, head, head_conv, opt=None):\n num_layers = int(arch[arch.find(\"_\") + 1 :]) if \"_\" in arch else 0\n arch = arch[: arch.find(\"_\")] if \"_\" in arch else arch\n model_class = _network_factory[arch]\n model = model_class(num_layers, heads=head, head_convs=head_conv, opt=opt)\n return model\n\n\ndef load_model(model, model_path, opt, optimizer=None):\n start_epoch = 0\n checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)\n print(\"loaded {}, epoch {}\".format(model_path, checkpoint[\"epoch\"]))\n state_dict_ = checkpoint[\"state_dict\"]\n state_dict = {}\n\n # convert data_parallal to model\n for k in state_dict_:\n if k.startswith(\"module\") and not k.startswith(\"module_list\"):\n state_dict[k[7:]] = state_dict_[k]\n else:\n state_dict[k] = state_dict_[k]\n model_state_dict = model.state_dict()\n\n # check loaded parameters and created model parameters\n for k in state_dict:\n if k in model_state_dict:\n if (state_dict[k].shape != model_state_dict[k].shape) or (\n opt.reset_hm\n and k.startswith(\"hm\")\n and (state_dict[k].shape[0] in [80, 1])\n ):\n if opt.reuse_hm:\n print(\n \"Reusing parameter {}, required shape{}, \"\n \"loaded shape{}.\".format(\n k, model_state_dict[k].shape, state_dict[k].shape\n )\n )\n if state_dict[k].shape[0] < state_dict[k].shape[0]:\n model_state_dict[k][: state_dict[k].shape[0]] = state_dict[k]\n else:\n model_state_dict[k] = state_dict[k][\n : model_state_dict[k].shape[0]\n ]\n state_dict[k] = model_state_dict[k]\n else:\n print(\n \"Skip loading parameter {}, required shape{}, \"\n \"loaded shape{}.\".format(\n k, model_state_dict[k].shape, state_dict[k].shape\n )\n )\n state_dict[k] = model_state_dict[k]\n else:\n print(\"Drop parameter {}.\".format(k))\n for k in model_state_dict:\n if not (k in state_dict):\n print(\"No param {}.\".format(k))\n state_dict[k] = model_state_dict[k]\n model.load_state_dict(state_dict, strict=False)\n\n # resume optimizer parameters\n if optimizer is not None and opt.resume:\n if \"optimizer\" in checkpoint:\n # optimizer.load_state_dict(checkpoint['optimizer'])\n start_epoch = checkpoint[\"epoch\"]\n start_lr = opt.lr\n for step in opt.lr_step:\n if start_epoch >= step:\n start_lr *= 0.1\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = start_lr\n print(\"Resumed optimizer with start lr\", start_lr)\n else:\n print(\"No optimizer parameters in checkpoint.\")\n if optimizer is not None:\n return model, optimizer, start_epoch\n else:\n return model\n\n\ndef save_model(path, epoch, model, optimizer=None):\n if isinstance(model, torch.nn.DataParallel):\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n data = {\"epoch\": epoch, \"state_dict\": state_dict}\n if not (optimizer is None):\n data[\"optimizer\"] = optimizer.state_dict()\n torch.save(data, path)\n" ]
[ [ "torch.save", "torch.load" ] ]
Standard-Cognition/recursive-bayesian-filtering
[ "6b22947aa0a58ef7839a96c48bd799cfe5437a2d" ]
[ "stats_tools.py" ]
[ "'''\nStatistics tools for tracking.\n\nMIT License\n\nCopyright (c) 2018 Standard Cognition\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\n# Standard\nfrom sys import float_info # for float_info.epsilon\nfrom typing import Union, Tuple, List\n\n# Scientific\nimport numpy as np\nfrom scipy.stats import chi2\nfrom matplotlib import pyplot as plt\nfrom matplotlib import patches\n\n\n# ----- Misc. -----\n\n\ndef det(A: np.ndarray) -> float:\n '''\n Evaluate determinant of a matrix A. Use direct formula for speedup in 2x2\n special case. For speed, input validity is not checked.\n '''\n if A.shape[0] == 2:\n determinant = A[0, 0]*A[1, 1] - A[0, 1]*A[1, 0]\n else:\n determinant = np.linalg.det(A)\n return determinant\n\n\n\n\n# ----- Multivariate Normal Distributions -----\n\n\ndef assert_cov_validity(\n cov: Union[float, np.ndarray],\n eigenvalue_lbnd: float = 1000.0*float_info.epsilon,\n condition_number_ubnd: float = 1.0e6):\n '''\n Assert that covariance `cov` is\n symmetric,\n real,\n positive-definite,\n has eigenvalues not too close to zero, and\n is well-conditioned.\n\n ::WARNING:: Applying `enforce_cov_validity` with the same parameters\n does not guarantee that these assertions will pass. Consider either (1)\n using the functions mutally exclusively, or (2) making the parameters of\n `enforce_cov_validity` slightly stricter in order to compensate for\n possible small numerical errors in eigenreconstruction.\n\n Args:\n cov: an alleged variance (as `float`) or covariance matrix (as\n `np.ndarray`).\n eigenvalue_lbnd: eigenvalues should be at least this much greater than\n zero. Must be strictly positive.\n condition_number_ubnd: inclusive upper bound on matrix condition\n number. Must be greater or equal to 1.0.\n\n Returns:\n Whether cov is positive definite and has all real elements.\n '''\n assert eigenvalue_lbnd > 0.0, \\\n 'Covariance eigenvalue lower bound must be > 0.0!'\n assert condition_number_ubnd >= 1.0, \\\n 'Covariance condition number bound must be >= 1.0!'\n\n # Symmetry\n if not np.isscalar(cov):\n assert (cov.T == cov).all(), 'Covariance must be symmetric!'\n\n # Realness\n assert np.isrealobj(cov), 'Covariance must be a real object!'\n\n # Eigenvalue properties\n if np.isscalar(cov):\n assert cov > 0.0, \\\n 'Variance must be strictly positive!'\n assert cov >= eigenvalue_lbnd, \\\n 'Variance must be >= lower bound!'\n else:\n # Precompute eigenvalues for subsequent tests.\n ws = np.linalg.eigvalsh(cov) # The eigenvalues of cov\n w_min = min(ws)\n w_max = max(ws)\n\n # Strict positivity\n assert w_min > 0.0, 'Covariance must be strictly positive!'\n\n # Eigenvalue lower bound\n assert w_min >= eigenvalue_lbnd, \\\n 'Covariance eigenvalues must be >= lower bound!'\n\n # Condition number upper bound\n assert w_max/w_min <= condition_number_ubnd, \\\n 'Condition number must be <= upper bound!'\n\n\ndef enforce_cov_validity(\n cov: Union[float, np.ndarray],\n eigenvalue_lbnd: float = 1000.0*float_info.epsilon,\n condition_number_ubnd: float = 1.0e6) -> Union[float, np.ndarray]:\n '''\n Create and return a version of covariance `cov` which is modified to\n ensure it is\n symmetric,\n real,\n positive-definite,\n has eigenvalues not too close to zero, and\n is well-conditioned.\n\n ::WARNING:: Applying this function to a numpy array does not guarantee that\n calling `assert_cov_validity` with the same parameters will pass.\n Consider either (1) using the functions mutally exclusively, or (2) making\n the parameters of `assert_cov_validity` slightly more lenient in\n order to compensate for possible small numerical errors in\n eigenreconstruction.\n\n Args:\n cov: an alleged variance (as `float`) or covariance matrix (as\n `np.ndarray`).\n eigenvalue_lbnd: eigenvalues should be at least this much greater than\n zero.\n condition_number_ubnd: upper bound on matrix condition number. Should\n be greater or equal to 1.0. If it is necessary to modify `cov` to\n enforce this, the largest eigenvalue is held fixed and the smaller\n are increased.\n\n Returns:\n A version of cov modified to be valid.\n '''\n assert eigenvalue_lbnd > 0.0, \\\n 'Covariance eigenvalue lower bound must be > 0.0!'\n assert condition_number_ubnd >= 1.0, \\\n 'Covariance condition number bound must be >= 1.0!'\n\n if np.isscalar(cov):\n # Realness\n cov = float(cov.real)\n\n # Eigenvalue lower bound\n if cov < eigenvalue_lbnd:\n cov = eigenvalue_lbnd\n else:\n # Symmetry\n cov = 0.5*(cov + cov.T)\n\n # Realness\n if not np.isrealobj(cov):\n cov = cov.real\n\n # Precompute eigendecomposition for subsequent enforcements.\n ws, vr = np.linalg.eigh(cov) # Eigenvalues and right eigenvectors\n\n # Eigenvalue lower bound\n for i, w in enumerate(ws):\n if w < eigenvalue_lbnd:\n ws[i] = eigenvalue_lbnd\n\n # Condition number upper bound\n # condition number := max_eigval/min_eigval <= condition_number_ubnd\n # <=> max_eigval/condition_number_ubnd <= min_eigval\n eigenvalue_lbnd_for_conditioning = max(ws)/condition_number_ubnd\n for i, w in enumerate(ws):\n if w < eigenvalue_lbnd_for_conditioning:\n ws[i] = eigenvalue_lbnd_for_conditioning\n\n # Eigenreconstruction\n cov = vr.dot(np.diag(ws).dot(vr.T))\n\n return cov\n\n\ndef evaluate_normal_pdf(\n x: Union[float, np.ndarray],\n cov: Union[float, np.ndarray],\n mean: Union[float, np.ndarray] = None) -> float:\n '''\n Compute and return the value of a multivariate normal PDF (Probability\n Density Function) at a point x.\n\n Args:\n x: where to evaluate PDF.\n cov: covariance of distribution.\n mean: mean of distribution. None => assumed zeros.\n\n Returns:\n PDF value at x.\n '''\n # Get dimension of distribution\n if np.isscalar(x):\n dimension = 1\n else:\n dimension = len(x)\n\n if mean is None:\n delta = x # assume zero mean\n else:\n delta = x - mean\n\n if dimension > 1:\n k = (2.0*np.pi)**(-0.5*dimension)*det(cov)**(-0.5)\n quadratic = delta.dot(np.linalg.solve(cov, delta))\n p = k*np.exp(-0.5*quadratic)\n else:\n k = (2.0*np.pi*cov)**(-0.5)\n quadratic = delta*(1.0/cov)*delta\n p = k*np.exp(-0.5*quadratic)\n\n return float(p)\n\n\ndef sample_from_normal_distribution(\n cov: Union[float, np.ndarray],\n cov_cholesky: np.ndarray = None,\n mean: Union[float, np.ndarray] = None,\n num_samples: int = 1) -> np.ndarray:\n '''\n Generate random sample(s) from a normal distribution having mean `mean`\n and covariance `cov`.\n\n This function is used instead of `np.random.multivariate_normal` because\n the latter issues incorrect warnings (as of 2018:05:24) and is less\n flexible in input. It may also be less efficient if you already have a\n Cholesky factorization.\n\n Args:\n cov: covariance of the distribution.\n cov_cholesky: optionally precomputed cholesky factorization, as output\n from `np.linalg.cholesky(cov)`. If `cov_cholesky` is None, then the\n covariance is allowed to be rank deficient.\n mean: mean of the distribution. None => assume zeros.\n num_samples: number of desired samples.\n\n Returns:\n Array of samples. Each column is a sample and the rows run over\n components of the vectors.\n '''\n if np.isscalar(cov):\n sigma = np.sqrt(cov)\n samples = sigma*np.random.normal(size=(1, num_samples)) + mean\n else:\n d = cov.shape[0]\n if mean is None:\n mean = np.zeros(d)\n try:\n if cov_cholesky is None:\n cov_cholesky = np.linalg.cholesky(cov)\n samples = np.dot(\n cov_cholesky, np.random.normal(size=(d, num_samples)))\n for i in range(d):\n samples[i, :] += mean[i]\n except np.linalg.linalg.LinAlgError:\n # Fall back on `np.random.multivariate_normal` only for rank-\n # deficient covariances.\n samples = np.random.multivariate_normal(\n mean=mean, cov=cov, size=num_samples)\n samples = samples.T\n\n return samples\n\n\n\n\n# ----- Error Ellipse Visualization -----\n\n\ndef generate_error_ellipse_points(\n mean: np.ndarray,\n cov: np.ndarray,\n cov_cholesky: np.ndarray = None,\n acceptance: float = 0.99,\n num_points: int = 30,\n format: str = 'matplotlib') -> np.ndarray:\n '''\n Generate points on a level set of a bivariate Gaussian PDF, usu. for\n plotting error ellipses.\n\n Args:\n mean: the distribution's mean.\n cov: 2x2 array, the distribution's covariance.\n cov_cholesky: optionally precomputed cholesky factorization, as output\n from `np.linalg.cholesky(cov)`.\n acceptance: probability mass that ellipse should contain around mean.\n num_points: number of points to sample on ellipse. This is a measure of\n plotting resolution.\n format: use 'matplotlib' for points output as a `float64` numpy array\n with rows running over x and y physical dimensions, columns over\n points. Use 'opencv' for points output as a `uint32` numpy array with\n rows running over points and columns running over x and y pixel\n dimensions.\n\n Returns:\n Shape (2, num_points) array of points for plotting.\n '''\n assert mean.shape == (2,), 'Incorrect mean shape!'\n assert cov.shape == (2, 2), 'Incorrect cov shape!'\n assert acceptance >= 0.0 and acceptance < 1.0, \\\n 'acceptance rate must be in [0.0, 1.0)!'\n\n # Sample points on unit circle.\n dtheta = 2.0*np.pi/num_points\n thetas = np.linspace(0, 2.0*np.pi - dtheta, num_points)\n if cov_cholesky is None:\n cov_cholesky = np.linalg.cholesky(cov)\n acceptance_factor = np.sqrt(chi2.ppf(acceptance, df=2))\n cov_cholesky = acceptance_factor*cov_cholesky\n points = np.zeros((2, num_points))\n points[0, :] = np.cos(thetas)\n points[1, :] = np.sin(thetas)\n\n # Warp circle points into ellipse.\n for i in range(num_points):\n points[:, i] = cov_cholesky.dot(points[:, i]) + mean\n\n if format == 'matplotlib':\n return points\n elif format == 'opencv':\n return points.T.astype(np.int32)\n else:\n assert False, 'Format not recognized!'\n\n\ndef plot_polygon(\n boundary_points: np.ndarray,\n edgecolor: str = 'k', facecolor: str = 'm', alpha: float = 0.5,\n linewidth: float = 3.0, linestyle: str = 'solid', zorder: int = 0):\n '''\n Wrapper for `plt.fill` that has reasonable default arguments for\n plotting acceptance regions, esp. error ellipses.\n\n Args:\n boundary_points: shape 2 x many, no repeat at wraparound. First row is\n x values, second is y values.\n edgecolor: edge color.\n facecolor: face color, 'none' => transparent interior.\n alpha: close to 0.0 => transparent, close to 1.0 => opaque.\n linewidth: usu. 3.0 or greater for good visibility.\n linestyle: e.g. '-', '--', or ':'.\n zorder: higher => closer to foreground.\n '''\n plt.fill(\n boundary_points[0, :], boundary_points[1, :],\n edgecolor=edgecolor, facecolor=facecolor, alpha=alpha,\n linewidth=linewidth, linestyle=linestyle, zorder=zorder)\n\n\ndef plot_error_ellipse(\n mean: np.ndarray,\n cov: np.ndarray,\n cov_cholesky: np.ndarray = None,\n acceptance: float = 0.99,\n num_points: int = 30,\n edgecolor: str = 'k', facecolor: str = 'm', alpha: float = 0.5,\n linewidth: float = 3.0, linestyle: str = 'solid', zorder: int = 0) \\\n -> List[patches.Polygon]:\n '''\n Plot 2D error ellipse from mean and covariance.\n\n Args:\n mean: distribution's mean (length 2).\n cov: distribution's covariance (2x2).\n cov_cholesky: optionally precomputed cholesky factorization, as output\n from `np.linalg.cholesky(cov)`.\n acceptance: amount of probability mass ellipse should contain.\n num_points: number of points to sample on ellipse. This is a measure of\n plotting resolution.\n edgecolor: edge color.\n facecolor: face color, 'none' => transparent interior.\n alpha: close to 0.0 => transparent, close to 1.0 => opaque.\n linewidth: usu. 3.0 or greater for good visibility.\n linestyle: e.g. '-', '--', or ':'.\n zorder: higher => closer to foreground.\n\n Returns:\n list of polygons.\n '''\n if cov_cholesky is None:\n cov_cholesky = np.linalg.cholesky(cov)\n boundary_points = generate_error_ellipse_points(\n mean=mean, cov=cov, cov_cholesky=cov_cholesky,\n acceptance=acceptance, num_points=num_points)\n\n polygons = plt.fill(\n boundary_points[0, :], boundary_points[1, :],\n edgecolor=edgecolor, facecolor=facecolor, alpha=alpha,\n linewidth=linewidth, linestyle=linestyle, zorder=zorder)[0]\n\n return polygons\n" ]
[ [ "numpy.linalg.cholesky", "numpy.sin", "numpy.random.normal", "numpy.zeros", "matplotlib.pyplot.fill", "numpy.linalg.eigh", "numpy.linalg.det", "numpy.exp", "numpy.random.multivariate_normal", "numpy.isscalar", "numpy.linalg.eigvalsh", "numpy.isrealobj", "numpy.cos", "numpy.sqrt", "numpy.linalg.solve", "numpy.linspace", "scipy.stats.chi2.ppf", "numpy.diag" ] ]
SatyaSiddharthDash/transformers
[ "a39dfe4fb122c11be98a563fb8ca43b322e01036" ]
[ "src/transformers/trainer_tf.py" ]
[ "\"\"\"Tensorflow trainer class.\"\"\"\n\nimport datetime\nimport logging\nimport math\nimport os\nimport sys\nimport warnings\nfrom typing import Callable, Dict, Optional, Tuple\n\nimport numpy as np\nimport tensorflow as tf\nfrom packaging.version import parse\n\nfrom .modeling_tf_utils import TFPreTrainedModel\nfrom .optimization_tf import GradientAccumulator, create_optimizer\nfrom .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, is_wandb_available, set_seed\nfrom .training_args_tf import TFTrainingArguments\n\n\nif is_wandb_available():\n import wandb\n\n\nlogger = logging.getLogger(__name__)\n\n\nif parse(tf.__version__).release < (2, 2, 0):\n logger.info(\n \"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is {}\".format(\n tf.__version__\n )\n )\n sys.exit(1)\n\n\nclass TFTrainer:\n \"\"\"\n TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,\n optimized for ๐Ÿค— Transformers.\n\n Args:\n model (:class:`~transformers.TFPreTrainedModel`):\n The model to train, evaluate or use for predictions.\n args (:class:`~transformers.TFTrainingArguments`):\n The arguments to tweak training.\n train_dataset (:class:`~tf.data.Dataset`, `optional`):\n The dataset to use for training.\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n The dataset to use for evaluation.\n compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):\n The function that will be used to compute metrics at evaluation. Must take a\n :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.\n prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):\n When performing evaluation and predictions, only returns the loss.\n tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):\n Object to write to TensorBoard.\n optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):\n A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of\n :class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of\n :class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of\n :class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else\n an instance of :class:`~transformers.WarmUp`.\n \"\"\"\n\n def __init__(\n self,\n model: TFPreTrainedModel,\n args: TFTrainingArguments,\n train_dataset: Optional[tf.data.Dataset] = None,\n eval_dataset: Optional[tf.data.Dataset] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n prediction_loss_only=False,\n tb_writer: Optional[tf.summary.SummaryWriter] = None,\n optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (\n None,\n None,\n ),\n ):\n self.model = model\n self.args = args\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.compute_metrics = compute_metrics\n self.prediction_loss_only = prediction_loss_only\n self.optimizer, self.lr_scheduler = optimizers\n self.gradient_accumulator = GradientAccumulator()\n self.global_step = 0\n self.epoch_logging = 0\n\n if tb_writer is not None:\n self.tb_writer = tb_writer\n else:\n self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)\n\n if is_wandb_available():\n self.setup_wandb()\n elif os.environ.get(\"WANDB_DISABLED\") != \"true\":\n logger.info(\n \"You are instantiating a Trainer but W&B is not installed. To use wandb logging, \"\n \"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface.\"\n )\n\n set_seed(self.args.seed)\n\n def get_train_tfdataset(self) -> tf.data.Dataset:\n \"\"\"\n Returns the training :class:`~tf.data.Dataset`.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n\n self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps\n self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()\n\n if self.num_train_examples < 0:\n raise ValueError(\"The training dataset must have an asserted cardinality\")\n\n ds = (\n self.train_dataset.repeat()\n .shuffle(self.num_train_examples, seed=self.args.seed)\n .batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n\n return self.args.strategy.experimental_distribute_dataset(ds)\n\n def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:\n \"\"\"\n Returns the evaluation :class:`~tf.data.Dataset`.\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n If provided, will override `self.eval_dataset`.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()\n\n if num_examples < 0:\n raise ValueError(\"The training dataset must have an asserted cardinality\")\n\n approx = math.floor if self.args.dataloader_drop_last else math.ceil\n steps = approx(num_examples / self.args.eval_batch_size)\n ds = (\n eval_dataset.repeat()\n .batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n\n return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples\n\n def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:\n \"\"\"\n Returns a test :class:`~tf.data.Dataset`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`): The dataset to use.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n\n num_examples = tf.data.experimental.cardinality(test_dataset).numpy()\n\n if num_examples < 0:\n raise ValueError(\"The training dataset must have an asserted cardinality\")\n\n approx = math.floor if self.args.dataloader_drop_last else math.ceil\n steps = approx(num_examples / self.args.eval_batch_size)\n ds = (\n test_dataset.repeat()\n .batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n\n return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples\n\n def create_optimizer_and_scheduler(self, num_training_steps: int):\n \"\"\"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n TFTrainer's init through :obj:`optimizers`, or subclass and override this method.\n \"\"\"\n if not self.optimizer and not self.lr_scheduler:\n self.optimizer, self.lr_scheduler = create_optimizer(\n self.args.learning_rate,\n num_training_steps,\n self.args.warmup_steps,\n adam_beta1=self.args.adam_beta1,\n adam_beta2=self.args.adam_beta2,\n adam_epsilon=self.args.adam_epsilon,\n weight_decay_rate=self.args.weight_decay,\n )\n\n def setup_wandb(self):\n \"\"\"\n Setup the optional Weights & Biases (`wandb`) integration.\n\n One can subclass and override this method to customize the setup if needed. Find more information\n `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:\n\n Environment:\n WANDB_PROJECT:\n (Optional): str - \"huggingface\" by default, set this to a custom string to store results in a different project\n WANDB_DISABLED:\n (Optional): boolean - defaults to false, set to \"true\" to disable wandb entirely\n \"\"\"\n if hasattr(self, \"_setup_wandb\"):\n warnings.warn(\n \"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.\",\n FutureWarning,\n )\n return self._setup_wandb()\n\n logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ[\"WANDB_DISABLED\"] = \"true\"')\n wandb.init(project=os.getenv(\"WANDB_PROJECT\", \"huggingface\"), config=vars(self.args))\n\n def prediction_loop(\n self,\n dataset: tf.data.Dataset,\n steps: int,\n num_examples: int,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and\n :func:`~transformers.TFTrainer.predict`.\n\n Works both with or without labels.\n \"\"\"\n if hasattr(self, \"_prediction_loop\"):\n warnings.warn(\n \"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.\",\n FutureWarning,\n )\n return self._prediction_loop(\n dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only\n )\n\n prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only\n\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", num_examples)\n logger.info(\" Batch size = %d\", self.args.eval_batch_size)\n\n label_ids: np.ndarray = None\n preds: np.ndarray = None\n self.eval_loss = tf.keras.metrics.Sum()\n\n # Reset the past mems state at the beginning of the evaluation if necessary.\n if self.args.past_index >= 0:\n self._past = None\n\n for step, batch in enumerate(dataset):\n logits = self.distributed_prediction_steps(batch)\n _, labels = batch\n\n if not prediction_loss_only:\n if isinstance(logits, tuple):\n logits = logits[0]\n\n if isinstance(labels, tuple):\n labels = labels[0]\n\n if self.args.n_replicas > 1:\n for val in logits.values:\n if preds is None:\n preds = val.numpy()\n else:\n preds = np.append(preds, val.numpy(), axis=0)\n\n for val in labels.values:\n if label_ids is None:\n label_ids = val.numpy()\n else:\n label_ids = np.append(label_ids, val.numpy(), axis=0)\n else:\n if preds is None:\n preds = logits.numpy()\n else:\n preds = np.append(preds, logits.numpy(), axis=0)\n\n if label_ids is None:\n label_ids = labels.numpy()\n else:\n label_ids = np.append(label_ids, labels.numpy(), axis=0)\n\n if step == steps:\n break\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n\n metrics[\"eval_loss\"] = self.eval_loss.result().numpy() / (steps * self.args.eval_batch_size)\n\n for key in list(metrics.keys()):\n if not key.startswith(\"eval_\"):\n metrics[f\"eval_{key}\"] = metrics.pop(key)\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def log(self, logs: Dict[str, float]) -> None:\n \"\"\"\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n \"\"\"\n if hasattr(self, \"_log\"):\n warnings.warn(\n \"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.\",\n FutureWarning,\n )\n return self._log(logs)\n logs[\"epoch\"] = self.epoch_logging\n\n if self.tb_writer:\n with self.tb_writer.as_default():\n for k, v in logs.items():\n tf.summary.scalar(k, v, step=self.global_step)\n self.tb_writer.flush()\n\n if is_wandb_available():\n wandb.log(logs, step=self.global_step)\n\n output = {**logs, **{\"step\": self.global_step}}\n\n logger.info(output)\n\n def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are\n task-dependent (pass it to the init :obj:`compute_metrics` argument).\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`.\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions.\n \"\"\"\n eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)\n\n output = self._prediction_loop(eval_ds, steps, num_examples, description=\"Evaluation\")\n logs = {**output.metrics}\n logs[\"epoch\"] = self.epoch_logging\n\n self.log(logs)\n\n return output.metrics\n\n def prediction_step(self, features: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Compute the prediction on features and update the loss with labels.\n\n Subclass and override to inject some custom behavior.\n \"\"\"\n per_example_loss, logits = self.run_model(features, labels, False)\n\n self.eval_loss.update_state(per_example_loss)\n\n return logits\n\n @tf.function\n def distributed_prediction_steps(self, batch):\n logits = self.args.strategy.run(self.prediction_step, batch)\n\n return logits\n\n def train(self) -> None:\n \"\"\"\n Train method to train the model.\n \"\"\"\n train_ds = self.get_train_tfdataset()\n\n if self.args.debug:\n tf.summary.trace_on(graph=True, profiler=True)\n\n self.gradient_accumulator.reset()\n\n if self.args.max_steps > 0:\n t_total = self.args.max_steps\n self.steps_per_epoch = self.args.max_steps\n else:\n approx = math.floor if self.args.dataloader_drop_last else math.ceil\n self.steps_per_epoch = approx(self.num_train_examples / self.total_train_batch_size)\n t_total = self.steps_per_epoch * self.args.num_train_epochs\n\n with self.args.strategy.scope():\n self.create_optimizer_and_scheduler(num_training_steps=t_total)\n iterations = self.optimizer.iterations\n self.global_step = iterations.numpy()\n folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)\n ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)\n self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)\n\n if self.model.ckpt_manager.latest_checkpoint:\n epochs_trained = self.global_step // (self.num_train_examples // self.args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = self.global_step % (\n self.num_train_examples // self.args.gradient_accumulation_steps\n )\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", self.global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n logger.info(\n \"Checkpoint file %s found and restoring from checkpoint\", self.model.ckpt_manager.latest_checkpoint\n )\n\n ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()\n else:\n epochs_trained = 1\n\n tf.summary.experimental.set_step(iterations)\n\n epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs\n\n if self.args.fp16:\n policy = tf.keras.mixed_precision.experimental.Policy(\"mixed_float16\")\n tf.keras.mixed_precision.experimental.set_policy(policy)\n\n with self.tb_writer.as_default():\n tf.summary.text(\"args\", self.args.to_json_string())\n\n self.tb_writer.flush()\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", self.num_train_examples)\n logger.info(\" Num Epochs = %d\", epochs)\n logger.info(\" Instantaneous batch size per device = %d\", self.args.per_device_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\", self.total_train_batch_size\n )\n logger.info(\" Gradient Accumulation steps = %d\", self.args.gradient_accumulation_steps)\n logger.info(\" Steps per epoch = %d\", self.steps_per_epoch)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n self.train_loss = tf.keras.metrics.Sum()\n start_time = datetime.datetime.now()\n\n for epoch_iter in range(epochs_trained, int(epochs + 1)):\n # Reset the past mems state at the beginning of each epoch if necessary.\n if self.args.past_index >= 0:\n self._past = None\n\n for step, batch in enumerate(train_ds):\n self.global_step = iterations.numpy()\n self.epoch_logging = epoch_iter - 1 + (step + 1) / self.steps_per_epoch\n\n self.distributed_training_steps(batch)\n\n training_loss = self.train_loss.result() / ((step + 1) * self.total_train_batch_size)\n\n if self.args.debug:\n logs = {}\n logs[\"loss\"] = training_loss.numpy()\n logs[\"epoch\"] = self.epoch_logging\n\n self.log(logs)\n\n if self.global_step == 1 and self.args.debug:\n with self.tb_writer.as_default():\n tf.summary.trace_export(\n name=\"training\", step=self.global_step, profiler_outdir=self.args.logging_dir\n )\n\n if (\n self.global_step > 0\n and self.args.evaluate_during_training\n and self.global_step % self.args.eval_steps == 0\n ):\n self.evaluate()\n\n if (self.global_step > 0 and self.global_step % self.args.logging_steps == 0) or (\n self.global_step == 1 and self.args.logging_first_step\n ):\n logs = {}\n logs[\"loss\"] = training_loss.numpy()\n logs[\"learning_rate\"] = self.lr_scheduler(self.global_step).numpy()\n logs[\"epoch\"] = self.epoch_logging\n\n self.log(logs)\n\n if self.global_step > 0 and self.global_step % self.args.save_steps == 0:\n ckpt_save_path = self.model.ckpt_manager.save()\n\n logger.info(\"Saving checkpoint for step {} at {}\".format(self.global_step, ckpt_save_path))\n\n if self.global_step > 0 and self.global_step % self.steps_per_epoch == 0:\n break\n\n self.train_loss.reset_states()\n\n end_time = datetime.datetime.now()\n\n logger.info(\"Training took: {}\".format(str(end_time - start_time)))\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n def training_step(self, features, labels):\n \"\"\"\n Perform a training step on features and labels.\n\n Subclass and override to inject some custom behavior.\n \"\"\"\n per_example_loss, _ = self.run_model(features, labels, True)\n scaled_loss = per_example_loss / self.total_train_batch_size\n gradients = tf.gradients(scaled_loss, self.model.trainable_variables)\n gradients = [\n g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)\n ]\n\n if self.args.gradient_accumulation_steps > 1:\n self.gradient_accumulator(gradients)\n\n self.train_loss.update_state(per_example_loss)\n\n if self.args.gradient_accumulation_steps == 1:\n return gradients\n\n def apply_gradients(self, features, labels):\n if self.args.gradient_accumulation_steps == 1:\n gradients = self.training_step(features, labels)\n\n self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))\n else:\n for _ in tf.range(self.args.gradient_accumulation_steps):\n reduced_features = features[: self.args.train_batch_size / self.args.n_replicas]\n reduced_labels = labels[: self.args.train_batch_size / self.args.n_replicas]\n\n self.training_step(reduced_features, reduced_labels)\n\n features = tf.concat(\n [features[self.args.train_batch_size / self.args.n_replicas :], reduced_features], axis=0\n )\n\n gradients = self.gradient_accumulator.gradients\n gradients = [\n (tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients\n ]\n\n self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))\n self.gradient_accumulator.reset()\n\n @tf.function\n def distributed_training_steps(self, batch):\n with self.args.strategy.scope():\n self.args.strategy.run(self.apply_gradients, batch)\n\n def run_model(self, features, labels, training):\n \"\"\"\n Computes the loss of the given features and labels pair.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n features (:obj:`tf.Tensor`): A batch of input features.\n labels (:obj:`tf.Tensor`): A batch of labels.\n training (:obj:`bool`): Whether or not to run the model in training mode.\n\n Returns:\n A tuple of two :obj:`tf.Tensor`: The loss and logits.\n \"\"\"\n if hasattr(self, \"_run_model\"):\n warnings.warn(\n \"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.\",\n FutureWarning,\n )\n return self._run_model(features, labels, training)\n\n if self.args.past_index >= 0 and getattr(self, \"_past\", None) is not None:\n features[\"mems\"] = self._past\n\n if isinstance(labels, (dict)):\n outputs = self.model(features, training=training, **labels)[:2]\n else:\n outputs = self.model(features, labels=labels, training=training)[:2]\n\n loss, logits = outputs[:2]\n\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n return loss, logits\n\n def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`):\n Dataset to run the predictions on.\n Returns:\n `NamedTuple`:\n predictions (:obj:`np.ndarray`):\n The predictions on :obj:`test_dataset`.\n label_ids (:obj:`np.ndarray`, `optional`):\n The labels (if the dataset contained some).\n metrics (:obj:`Dict[str, float]`, `optional`):\n The potential dictionary of metrics (if the dataset contained labels).\n \"\"\"\n test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)\n\n return self.prediction_loop(test_ds, steps, num_examples, description=\"Prediction\")\n\n def save_model(self, output_dir: Optional[str] = None):\n \"\"\"\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n \"\"\"\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n\n logger.info(\"Saving model in {}\".format(output_dir))\n\n if not isinstance(self.model, TFPreTrainedModel):\n raise ValueError(\"Trainer.model appears to not be a PreTrainedModel\")\n\n self.model.save_pretrained(output_dir)\n" ]
[ [ "tensorflow.keras.mixed_precision.experimental.set_policy", "tensorflow.range", "tensorflow.summary.trace_on", "tensorflow.train.CheckpointManager", "tensorflow.concat", "tensorflow.data.experimental.cardinality", "tensorflow.summary.scalar", "tensorflow.keras.metrics.Sum", "tensorflow.summary.experimental.set_step", "tensorflow.gradients", "tensorflow.keras.mixed_precision.experimental.Policy", "tensorflow.zeros_like", "tensorflow.clip_by_value", "tensorflow.summary.trace_export", "tensorflow.summary.create_file_writer", "tensorflow.train.Checkpoint" ] ]
izzrak/numpy_cnn
[ "f8dc1d577520d70da2f159b6ad6b36b5ad0cc5e1" ]
[ "conv_net.py" ]
[ "import numpy as np\nfrom conv_layer import conv_layer\nfrom pooling_layer import max_pooling\nfrom activation_layer import relu\nfrom fc_layer import fc_layer\nfrom loss_layer import loss_layer\n\n\nclass conv_net(object):\n \"\"\"\n Convolution net class\n methods:\n - forward: forward pass\n - backprop: back propagation\n - update: update weight \n \"\"\"\n def __init__(self, input_shape):\n # define batch param\n self.batch_loss = 0\n self.batch_acc = 0\n self.batch_size = input_shape[0]\n \n # define network\n print('network structure:')\n self.conv1 = conv_layer (input_shape, 8, 5, 1, 2) #[batch_size, 28, 28, 1]\n print('conv1', self.conv1.output_shape)\n self.relu1 = relu(self.conv1.output_shape)\n print('relu1',self.relu1.output_shape)\n self.pool1 = max_pooling(self.relu1.output_shape)\n print('pool1', self.pool1.output_shape)\n self.conv2 = conv_layer (self.pool1.output_shape, 16, 3, 1, 1)\n print('conv2', self.conv2.output_shape)\n self.relu2 = relu(self.conv2.output_shape)\n print('relu2', self.relu2.output_shape)\n self.pool2 = max_pooling(self.relu2.output_shape)\n print('pool2', self.pool2.output_shape)\n self.fc = fc_layer(self.pool2.output_shape, 10)\n print('fc', self.fc.output_shape)\n self.loss = loss_layer(self.fc.output_shape)\n\n def forward(self, x, labels=None):\n \t# clear batch loss\n self.batch_loss = 0\n self.batch_acc = 0\n\n # forward pass\n conv1_out = self.conv1.forward(x)\n relu1_out = self.relu1.forward(conv1_out)\n pool1_out = self.pool1.forward(relu1_out)\n conv2_out = self.conv2.forward(pool1_out)\n relu2_out = self.relu2.forward(conv2_out)\n pool2_out = self.pool2.forward(relu2_out)\n fc_out = self.fc.forward(pool2_out)\n\n # compute loss\n if type(labels) == np.ndarray:\n self.batch_loss += self.loss.loss_foward(fc_out, np.array(labels))\n for j in range(self.batch_size):\n if np.argmax(self.loss.sf[j]) == labels[j]:\n self.batch_acc += 1\n else:\n # compute softmax only\n self.loss.sf = self.loss.softmax(fc_out)\n\n\n def backprop(self):\n # back propagation\n self.conv1.backprop(self.relu1.backprop(\n self.pool1.backprop(\n self.conv2.backprop(\n self.relu2.backprop(\n self.pool2.backprop(\n self.fc.backprop(\n self.loss.backprop())))))))\n\n def update(self, lr=1e-4, weight_decay=0.0004):\n # adam optimizer\n self.fc.update(lr, weight_decay)\n self.conv2.update(lr, weight_decay)\n self.conv1.update(lr, weight_decay)\n\nif __name__ == \"__main__\":\n net_test = conv_net([2,32,32,4])\n # img = np.random.standard_normal((2, 32, 32, 3))\n imgs = np.ones((2, 32, 32, 4))\n imgs *= 2\n labels = np.array([1,0]).reshape((2,1))\n net_test.forward(imgs,labels)\n net_test.backprop()\n\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.argmax" ] ]
xinzhou-ntu/DGNs
[ "1abbecf921c3442c6fbd065eeec16cb32605bd1e" ]
[ "realworld_benchmark/nets/mlp_readout_layer.py" ]
[ "# MIT License\n# Copyright (c) 2020 Vijay Prakash Dwivedi, Chaitanya K. Joshi, Thomas Laurent, Yoshua Bengio, Xavier Bresson\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\"\"\"\n MLP Layer used after graph vector representation\n\"\"\"\n\nclass MLPReadout(nn.Module):\n\n def __init__(self, input_dim, output_dim, L=2, decreasing_dim=True): # L=nb_hidden_layers\n super().__init__()\n if decreasing_dim:\n list_FC_layers = [nn.Linear(input_dim // 2 ** l, input_dim // 2 ** (l + 1), bias=True) for l in range(L)]\n list_FC_layers.append(nn.Linear(input_dim // 2 ** L, output_dim, bias=True))\n else:\n list_FC_layers = [nn.Linear(input_dim, input_dim, bias=True) for _ in range(L)]\n list_FC_layers.append(nn.Linear(input_dim, output_dim, bias=True))\n self.FC_layers = nn.ModuleList(list_FC_layers)\n self.L = L\n\n def forward(self, x):\n y = x\n for l in range(self.L):\n y = self.FC_layers[l](y)\n y = F.relu(y)\n y = self.FC_layers[self.L](y)\n return y\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.ModuleList" ] ]
SecLoop/ECommerceCrawlers
[ "8121a4a23dd8bb38f1ebf9a7bc838952d4146834" ]
[ "QiChaCha/get_parks_companies_threads.py" ]
[ "import os\nimport requests\nfrom config import *\nfrom lxml import etree\nimport csv\n# from fake_useragent import UserAgent\nimport pandas as pd\nimport threading\nimport time\nimport random\n\ndef log(txt):\n print(txt)\n with open('log.txt', 'a') as f:\n f.write(txt+'\\n')\n\n\nclass QiChaCha:\n def __init__(self, cookie, proxies, companies_name):\n self.cookie = cookie\n self.proxies = proxies\n self.companies_name = companies_name\n # ua = UserAgent(verify_ssl=False)\n self.headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n # 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Cookie': self.cookie,\n 'DNT': '1',\n 'Host': 'www.qichacha.com',\n 'Referer': 'https://www.qichacha.com/more_zonecompany.html?id=000c85b2a120712454f4c5b74e4fdfae&p=2',\n 'Sec-Fetch-Dest': 'document',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'none',\n 'Sec-Fetch-User': '?1',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'\n # 'User-Agent': ua.random\n }\n self.path = './csv/'\n self.file_name = self.path+self.companies_name+'.csv'\n self.ListTask = []\n self.csv_data = pd.read_csv('./csv/ๅ…จๅ›ฝๅทฅไธšๅ›ญๅŒบไฟกๆฏ.csv')\n self.length = len(self.csv_data)\n\n self.work()\n\n def get_companies(self, id, page_no):\n url = 'https://www.qichacha.com/more_zonecompany.html?id={}&p={}'.format(\n id, page_no)\n while True:\n try:\n # with requests.get(url, headers=self.headers, proxies=self.proxies) as response:\n with requests.get(url, headers=self.headers) as response:\n # response = requests.get(url, headers=self.headers)\n html = response.text\n parseHtml = etree.HTML(html)\n\n return parseHtml\n except Exception as e:\n log('ไปฃ็†่ฏทๆฑ‚ๆ•…้šœ๏ผŒ้‡ๅคไปปๅŠก๏ผ')\n pass\n\n def get_companies_all(self, name_thread, id, province, city, county, park, area, numcop):\n num_page = numcop // 10 + 1\n\n\n for i in range(1, num_page+1):\n # for i in range(1, 2):\n parseHtml = self.get_companies(id, i)\n # '/firm_2468290f38f4601299b29acdf6eccce9.html'\n rUrls = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/a/@href')\n # 'ไธดๆตทๅธ‚ไบ’้€šๆฑฝ่ฝฆ้”€ๅ”ฎๆœ‰้™ๅ…ฌๅธ'\n rTitle = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/a/text()')\n # '้ป„ๅ‰‘ๅ‹‡'\n rPerson = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/p[1]/a/text()')\n # 'ๆณจๅ†Œ่ต„ๆœฌ๏ผš1000ไธ‡ๅ…ƒไบบๆฐ‘ๅธ'\n rCapital = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/p[1]/span[1]/text()')\n # 'ๆˆ็ซ‹ๆ—ฅๆœŸ๏ผš2017-09-08'\n rSetTime = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/p[1]/span[2]/text()')\n # '\\n ้‚ฎ็ฎฑ๏ผš[email protected]\\n '\n rEmail = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/p[2]/text()')\n # '็”ต่ฏ๏ผš0576-85323665'\n rPhone = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/p[2]/span/text()')\n # '\\n ๅœฐๅ€๏ผšๆต™ๆฑŸ็œๅฐๅทžๅธ‚ไธดๆตทๅธ‚ๆฑŸๅ—่ก—้“ๆ’ๅคงๅฎถๅฑ…ๅปบๆๅŸŽ(้–ๆฑŸๅ—่ทฏ112ๅท)\\n '\n rAddress = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[2]/p[3]/text()')\n # 'ๅญ˜็ปญ'\n rState = parseHtml.xpath(\n '//div[@class=\"e_zone-company\"]/section/table/tbody/tr/td[3]/span/text()')\n\n num_current = len(rUrls)\n for num in range(num_current):\n try:\n url = 'https://www.qichacha.com'+rUrls[num]\n company = rTitle[num]\n person = rPerson[num]\n capital = rCapital[num].replace('ๆณจๅ†Œ่ต„ๆœฌ๏ผš', '')\n settime = rSetTime[num].replace('ๆˆ็ซ‹ๆ—ฅๆœŸ๏ผš', '')\n email = rEmail[num].replace(\n '\\n', '').replace('้‚ฎ็ฎฑ๏ผš', '').strip()\n phone = rPhone[num].replace('็”ต่ฏ๏ผš', '')\n address = rAddress[num].replace(\n '\\n', '').replace('ๅœฐๅ€๏ผš', '').strip()\n state = rState[num]\n L = [province, city, county, park, area, numcop, company,\n person, capital, settime, email, phone, address, state, url]\n with open(self.file_name, 'a', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow(L)\n except Exception as e:\n self.err_log(id, i)\n log(\n '{} ๆŠฅ้”™ ID: {} , ้กต็ : {} / {}'.format(name_thread, id, i, num_page))\n log('{} ๅฎŒๆˆ็ˆฌๅ– ID: {} , ้กต็ : {} / {}'.format(name_thread, id, i, num_page))\n\n def err_log(self, id, page):\n err_file = self.path + 'error.csv'\n if not os.path.exists(err_file):\n header = ['id', 'page']\n with open(err_file, 'a', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n with open(err_file, 'a', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow([id, page])\n\n def thread_task(self):\n name_thread = threading.current_thread().name\n n = 3 # # ไธ‰ๆฌก่ฏทๆฑ‚ self.ListTash ๆ— ่ฟ”ๅ›ž, ๅˆ™่ฎคไธบๆ‰€ๆœ‰ๆ•ฐๆฎ็ˆฌๅ–ๅฎŒๆฏ•\n while True:\n if n == 0:\n break\n try:\n i = self.ListTask.pop(0)\n province = self.csv_data.loc[i, 'province']\n city = self.csv_data.loc[i, 'city']\n county = self.csv_data.loc[i, 'county']\n park = self.csv_data.loc[i, 'park']\n area = self.csv_data.loc[i, 'area']\n numcop = self.csv_data.loc[i, 'numcop']\n id = self.csv_data.loc[i, 'url'].split('_')[-1]\n self.get_companies_all(name_thread, id, province,\n city, county, park, area, numcop)\n log('\\n\\n{} ๅฎŒๆˆ็ˆฌๅ– ID: {}, ๆ•ดไฝ“่ฟ›ๅบฆ: {} / {}\\n\\n=============================\\n'.format(\n name_thread, id, i+1, self.length))\n n = 3\n except Exception as e:\n n -= 1\n time.sleep(random.randint(3,10))\n\n def work(self):\n # ๅˆคๆ–ญ\\ๆ–ฐๅปบๆ–‡ไปถๅคน\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n log(self.path+' ๆ–‡ไปถๅคนๅˆ›ๅปบๆˆๅŠŸ')\n\n # ๅˆคๆ–ญ\\ๆ–ฐๅปบๆ–‡ไปถ\n if not os.path.exists(self.file_name):\n header = ['province', 'city', 'county', 'park', 'area', 'numcop', 'company',\n 'person', 'capital', 'settime', 'email', 'phone', 'address', 'state', 'url']\n with open(self.file_name, 'a', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n\n for i in range(self.length):\n self.ListTask.append(i)\n\n threads = []\n for i in range(3):\n thread = threading.Thread(target=self.thread_task, args=())\n threads.append(thread)\n\n # ๅฏๅŠจๅคš็บฟ็จ‹\n for t in threads:\n t.start()\n log('ๅผ€ๅฏ็บฟ็จ‹: '+t.name)\n\n for t in threads:\n t.join()\n log('ๅ…ณ้—ญ็บฟ็จ‹: '+t.name)\n\n log('ไธป็บฟ็จ‹็ป“ๆŸ๏ผ '+threading.current_thread().name)\n\n\nif __name__ == \"__main__\":\n QiChaCha(cookie, proxies, companies_name)\n\nprint()\n" ]
[ [ "pandas.read_csv" ] ]
TxT168/nlp-bert
[ "c8173797abb81a2c9a7e6911c477106c5a2cede5" ]
[ "utils.py" ]
[ "# coding: UTF-8\nimport torch\nfrom tqdm import tqdm\nimport time\nfrom datetime import timedelta\n\nPAD, CLS = '[PAD]', '[CLS]' # padding็ฌฆๅท, bertไธญ็ปผๅˆไฟกๆฏ็ฌฆๅท\n\n\ndef build_dataset(config):\n\n def load_dataset(path, pad_size=32):\n contents = []\n with open(path, 'r', encoding='UTF-8') as f:\n for line in tqdm(f):\n lin = line.strip()\n if not lin:\n continue\n label, content = lin.split('\\t')\n token = config.tokenizer.tokenize(content)\n token = [CLS] + token\n seq_len = len(token)\n mask = []\n token_ids = config.tokenizer.convert_tokens_to_ids(token)\n\n if pad_size:\n if len(token) < pad_size:\n mask = [1] * len(token_ids) + [0] * (pad_size - len(token))\n token_ids += ([0] * (pad_size - len(token)))\n else:\n mask = [1] * pad_size\n token_ids = token_ids[:pad_size]\n seq_len = pad_size\n contents.append((token_ids, int(label), seq_len, mask))\n return contents\n train = load_dataset(config.train_path, config.pad_size)\n dev = load_dataset(config.dev_path, config.pad_size)\n test = load_dataset(config.test_path, config.pad_size)\n return train, dev, test\n\n\nclass DatasetIterater(object):\n def __init__(self, batches, batch_size, device):\n self.batch_size = batch_size\n self.batches = batches\n self.n_batches = len(batches) // batch_size\n self.residue = False # ่ฎฐๅฝ•batchๆ•ฐ้‡ๆ˜ฏๅฆไธบๆ•ดๆ•ฐ\n if len(batches) % self.n_batches != 0:\n self.residue = True\n self.index = 0\n self.device = device\n\n def _to_tensor(self, datas):\n x = torch.LongTensor([_[0] for _ in datas]).to(self.device)\n y = torch.LongTensor([_[1] for _ in datas]).to(self.device)\n\n # padๅ‰็š„้•ฟๅบฆ(่ถ…่ฟ‡pad_size็š„่ฎพไธบpad_size)\n seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)\n mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)\n return (x, seq_len, mask), y\n\n def __next__(self):\n if self.residue and self.index == self.n_batches:\n batches = self.batches[self.index * self.batch_size: len(self.batches)]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n elif self.index >= self.n_batches:\n self.index = 0\n raise StopIteration\n else:\n batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n def __iter__(self):\n return self\n\n def __len__(self):\n if self.residue:\n return self.n_batches + 1\n else:\n return self.n_batches\n\n\ndef build_iterator(dataset, config):\n iter = DatasetIterater(dataset, config.batch_size, config.device)\n return iter\n\n\ndef get_time_dif(start_time):\n \"\"\"่Žทๅ–ๅทฒไฝฟ็”จๆ—ถ้—ด\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n" ]
[ [ "torch.LongTensor" ] ]
akeaveny/Mask_RCNN
[ "b898286e3bebb4af16cc2dfc6f1167eeadd4292f" ]
[ "mrcnn/modeldepth.py" ]
[ "\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport skimage.transform\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f} {}\".format(\n str(array.shape),\n array.min() if array.size else \"\",\n array.max() if array.size else \"\",\n array.dtype))\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when inferencing\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n \n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True, lType=''):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layres\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'+lType\n bn_name_base = 'bn' + str(stage) + block + '_branch'+lType\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out'+lType)(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True, lType=''):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layres\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'+lType\n bn_name_base = 'bn' + str(stage) + block + '_branch'+lType\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out'+lType)(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True, lType=''):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layres\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1'+lType, use_bias=True)(x)\n x = BatchNorm(name='bn_conv1'+lType)(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn, lType=lType)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn, lType=lType)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn,lType=lType)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn,lType=lType)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn, lType=lType)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn, lType=lType)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn, lType=lType)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn,lType=lType)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn, lType=lType)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn, lType=lType)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn, lType=lType)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn, lType=lType)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(6000, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementatin of Log2. TF doesn't have a native implemenation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [height, width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indicies for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n pooled = tf.expand_dims(pooled, 0)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeate boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeate() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n Class-specific bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine postive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI corrdinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,\n (dy, dx, log(dh), log(dw), class_id)]\n Class-specific bbox refinements.\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, 1), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in image coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indicies\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n \n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the featuremap\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location, depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps,depth_feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from diffent layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layres\n\n Returns:\n logits: [N, NUM_CLASSES] classifier logits (before softmax)\n probs: [N, NUM_CLASSES] classifier probabilities\n bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n ## depth\n x_depth = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier_depth\")([rois, image_meta] + depth_feature_maps)\n\n x_depth = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1_depth\")(x_depth)\n x_depth = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1_depth')(x_depth, training=train_bn)\n x_depth = KL.Activation('relu')(x_depth)\n x_depth = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),\n name=\"mrcnn_class_conv2_depth\")(x_depth)\n x_depth = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2_depth')(x_depth, training=train_bn)\n x_depth = KL.Activation('relu')(x_depth)\n\n shared_depth = KL.Lambda(lambda x: K.squeeze(K.squeeze(x_depth, 3), 2),\n name=\"pool_squeeze_depth\")(x_depth)\n\n shared_concatenated = keras.layers.concatenate([shared,shared_depth], axis=-1)\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared_concatenated)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared_concatenated)\n # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from diffent layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layres\n\n Returns: Masks [batch, roi_count, height, width, num_classes]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv1\")(x)\n ### TODO: 56 x 56\n # x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n # name=\"mrcnn_mask_deconv2\")(x)\n\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typicallly: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Crossentropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n # TODO: use smooth_l1_loss() rather than reimplementing here\n # to reduce code duplication\n diff = K.abs(target_bbox - rpn_bbox)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indicies.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (Depricated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n\n if class_ids.size == 0:\n return image, None, class_ids, None, None\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is depricated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmentors that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return (augmenter.__class__.__name__ in MASK_AUGMENTERS)\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\ndef load_images_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (Depricated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image, depth_image = dataset.load_image_rgb_depth(dataset.image_reference(image_id))\n mask, class_ids = dataset.load_mask(image_id)\n if class_ids.size == 0:\n return image, depth_image, None, class_ids, None, None\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n\n depth_image, _, _, _, _ = utils.resize_image(\n depth_image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is depricated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmentors that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return (augmenter.__class__.__name__ in MASK_AUGMENTERS)\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # if image_shape[-1] == 3:\n # image = det.augment_image(image)\n # mask = det.augment_image(mask.astype(np.uint8),\n # hooks=imgaug.HooksImages(activator=hook))\n # else:\n # image = det.augment_images(image)\n # mask = det.augment_images(mask.astype(np.uint8),\n # hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, depth_image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Grund truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indicies of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(skimage.transform.resize(\n class_mask, (gt_h, gt_w), order=1, mode=\"constant\")).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = skimage.transform.resize(m, config.MASK_SHAPE, order=1, mode=\"constant\")\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (Depricated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The containtes\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, depth_image,image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_images_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_depth_images = np.zeros(\n (batch_size,) + depth_image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_depth_images[b] = mold_image(depth_image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_depth_images ,batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, 3], name=\"input_image\")\n input_depth_image = KL.Input(\n shape=[None, None, 3], name=\"input_depth_image\")\n\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n\n rpn_feature_maps,mrcnn_feature_maps, mrcnn_depth_feature_maps = self. buildResnetGraphTopDownLayers(input_image, input_depth_image, config)\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), 256)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, mrcnn_depth_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_depth_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, mrcnn_depth_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in \n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image,input_depth_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def buildResnetGraphTopDownLayers(self, input_image,input_depth_image, config):\n\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # for depth\n _, C2, C3, C4, C5 = resnet_graph(input_depth_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN, lType='_depth')\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5_depth')(C5)\n P4 = KL.Add(name=\"fpn_p4add_depth\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled_depth\")(P5),\n KL.Conv2D(256, (1, 1), name='fpn_c4p4_depth')(C4)])\n P3 = KL.Add(name=\"fpn_p3add_depth\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled_depth\")(P4),\n KL.Conv2D(256, (1, 1), name='fpn_c3p3_depth')(C3)])\n P2 = KL.Add(name=\"fpn_p2add_depth\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled_depth\")(P3),\n KL.Conv2D(256, (1, 1), name='fpn_c2p2_depth')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p2_depth\")(P2)\n P3 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p3_depth\")(P3)\n P4 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p4_depth\")(P4)\n P5 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p5_depth\")(P5)\n\n mrcnn_depth_feature_maps = [P2, P3, P4, P5]\n\n return rpn_feature_maps, mrcnn_feature_maps, mrcnn_depth_feature_maps\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n # print(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def save_weights(self, filepath, name):\n self.keras_model.save_weights(filepath=filepath, overwrite=True)\n\n def load_weights_keras(self, filepath):\n self.keras_model.load_weights(filepath)\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") \\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs), metrics=['acc'])\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainble layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n regex = r\".*/\\w+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_\\w+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heaads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gausssian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matricies [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matricies:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(masks.shape[1:3] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detectWdepth(self, images, depthimages, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n molded_depth_images, _, _ = self.mold_inputs(depthimages)\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"molded_depth_images\", molded_depth_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images,molded_depth_images,image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, depthimages, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also retruned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n molded_depth_images, _, _ = self.mold_inputs(depthimages)\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, molded_depth_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, depthimages, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and noramlized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n ##########################\n #\n ##########################\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, _ = self.mold_inputs(images)\n molded_depth_images, _, _ = self.mold_inputs(depthimages)\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n ##########################\n # Prepare inputs\n ##########################\n #\n # if image_metas is None:\n # molded_images, image_metas, _ = self.mold_inputs(images)\n # else:\n # molded_images = images\n # print(\"Test:\", molded_images.shape)\n # image_shape = molded_images[0].shape\n # # Anchors\n # anchors = self.get_anchors(image_shape)\n # # Duplicate across the batch dimension because Keras requires it\n # # TODO: can this be optimized to avoid duplicating the anchors?\n # anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n ##########################\n\n model_in = [molded_images, molded_depth_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v) for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtraces\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name=None):\n \"\"\"Often boxes are represented with matricies of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n" ]
[ [ "tensorflow.exp", "numpy.random.choice", "tensorflow.image.non_max_suppression", "numpy.copy", "tensorflow.unique", "tensorflow.reshape", "numpy.where", "tensorflow.sqrt", "numpy.sort", "tensorflow.stack", "tensorflow.control_dependencies", "numpy.broadcast_to", "tensorflow.divide", "tensorflow.cast", "tensorflow.identity", "numpy.concatenate", "numpy.divide", "tensorflow.shape", "numpy.empty", "tensorflow.concat", "numpy.log", "tensorflow.argmax", "tensorflow.image.crop_and_resize", "tensorflow.Variable", "tensorflow.transpose", "tensorflow.add_n", "tensorflow.constant", "tensorflow.squeeze", "numpy.argmax", "numpy.random.randint", "numpy.arange", "tensorflow.split", "tensorflow.pad", "numpy.expand_dims", "tensorflow.abs", "numpy.array", "tensorflow.range", "tensorflow.minimum", "numpy.reshape", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.gather_nd", "tensorflow.round", "tensorflow.where", "numpy.delete", "numpy.random.shuffle", "tensorflow.map_fn", "tensorflow.sparse_tensor_to_dense", "tensorflow.log", "numpy.stack", "numpy.amax", "tensorflow.reduce_sum", "tensorflow.nn.top_k", "numpy.hstack", "tensorflow.boolean_mask", "tensorflow.logical_and", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.fliplr", "tensorflow.random_shuffle", "tensorflow.size", "tensorflow.multiply", "numpy.sum", "numpy.ones", "tensorflow.equal", "numpy.any", "tensorflow.reduce_max", "numpy.abs", "tensorflow.gather", "tensorflow.maximum", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
cgpu/Long-Read-Proteogenomics
[ "7303e3c9cd4e58f98143eda0994c937a606206f9" ]
[ "modules/visualization_track/src/track_add_rgb_colors_to_bed.py" ]
[ "#!/usr/bin/env python3\n\n# add rgb shading value based on the relative abundances of all pb transcripts\n# of a gene\n\n# %%\n\nimport pandas as pd\nimport math\nimport argparse\n\n# examine all pb transcripts of a gene, determine rgb color\ndef calculate_rgb_shading(grp):\n \"\"\"\n Examine CPM for all PB transc\n ripts of a gene and get rgb shading factor.\n \"\"\"\n\n # rgb scaling\n rgb_scale = [\n '0,0,0', '26,0,0', '51,0,0', '77,0,0', '102,0,0',\n '128,0,0', '153,0,0', '179,0,0', '204,0,0', '230,0,0',\n '255,0,0', '255,26,26', '255,51,51', '255,77,77', '255,102,102',\n '255,128,128', '255,153,153', '255,179,179', '255,204,204', '255,230,230']\n max_cpm = grp.cpm.max()\n out_df = pd.DataFrame(columns = ['acc_full', 'pb_acc', 'cpm', 'fc', 'log2fc', 'log2fcx3', 'ceil_idx', 'rgb'])\n for i, row in grp.iterrows():\n cpm = row['cpm']\n fc = float(max_cpm) / float(cpm)\n log2fc = math.log(fc, 2) \n log2fcx3 = log2fc * 3\n ceil_idx = math.ceil(log2fcx3)\n if ceil_idx > 19:\n ceil_idx = 19\n rgb = rgb_scale[ceil_idx] \n out_df = out_df.append({'acc_full': row['acc_full'],\n 'pb_acc': row['pb_acc'],\n 'cpm': row['cpm'],\n 'fc': fc,\n 'log2fc': log2fc,\n 'log2fcx3': log2fcx3,\n 'ceil_idx': ceil_idx,\n 'rgb': rgb}, ignore_index=True)\n # comment out line below to return all intermediate values\n out_df = out_df[['acc_full','rgb']]\n return out_df\n\n\ndef add_rgb_shading_cpm(name, bed,split_size):\n \"\"\"\n Reads a BAM file containing CPM info to determine rgb color to use for track visualizatio\n\n Parameters\n ----------\n name : str \n name of sample\n bed_file : filename\n file of bed cds to read\n \"\"\"\n \n \n # subset df to determine rgb shading\n if split_size==3:\n subbed = bed[['acc_full', 'gene', 'pb_acc', 'cpm']].copy()\n elif split_size==4:\n subbed = bed[['acc_full', 'gene', 'pb_acc','pclass', 'cpm']].copy()\n subbed['cpm'] = subbed['cpm'].astype(str).astype(int)\n\n shaded = subbed.groupby('gene').apply(calculate_rgb_shading).reset_index(drop=True)\n\n # include rgb into original bed12\n bed_shaded = pd.merge(bed, shaded, how='left', on='acc_full')\n bed_shaded.gene = bed_shaded.gene.apply(lambda x: x[:9])\n gene_sizes = bed_shaded['gene'].apply(lambda x: len(x))\n max_gene = max(gene_sizes)\n\n pb_sizes = bed_shaded['pb_acc'].apply(lambda x: len(x))\n max_pb = max(pb_sizes)\n\n bed_shaded['cpm'] = bed_shaded['cpm'].apply(lambda cpm: str(cpm) if int(cpm) <=1000 else f'{int(cpm)/1000:.1f}K')\n cpm_sizes = bed_shaded['cpm'].apply(lambda x: len(x))\n cpm_len = max(cpm_sizes)\n \n # shaded['cpm_int'] = shaded['cpm'].apply(lambda x: str(round(x)).split('.')[0])\n if split_size==3:\n bed_shaded['new_acc_full'] = bed_shaded['acc_full']\n # bed_shaded['new_acc_full'] = bed_shaded.apply(lambda row: f'{row.gene:_<{max_gene+1}}{row.pb_acc:_<{max_pb+1}}{row.cpm:_>{cpm_len+1}}', axis = 1)\n # bed_shaded['new_acc_full'] = bed_shaded.apply(lambda row: f'{row.gene}_{row.pb_acc:_<{max_pb+1}}{row.cpm:_>{cpm_len+1}}', axis = 1)\n\n if split_size==4:\n bed_shaded['new_acc_full'] = bed_shaded['acc_full']\n # bed_shaded['new_acc_full'] = bed_shaded.apply(lambda row: f'{row.gene:_<{max_gene+1}}{row.pb_acc:_<{max_pb+1}}{row.pclass}{row.cpm:_>{cpm_len+1}}', axis = 1)\n # bed_shaded['new_acc_full'] = bed_shaded.apply(lambda row: f'{row.gene}_{row.pb_acc:_<{max_pb+1}}{row.pclass}{row.cpm:_>{cpm_len+1}}', axis = 1)\n\n # join in the rgb data and new accession\n \n bed_shaded = bed_shaded[['chrom', 'chromStart', 'chromStop', 'new_acc_full', 'score', 'strand', 'thickStart', 'thickEnd', 'rgb', 'blockCount', 'blockSizes', 'blockStarts']]\n\n with open(f'{name}_shaded_cpm.bed12', 'w') as ofile:\n ofile.write(f'track name=\"{name.capitalize()} PacBio Protein\" itemRgb=On\\n')\n bed_shaded.to_csv(ofile, sep='\\t', index=None, header=None)\n\n\ndef add_rgb_shading_pclass(name,bed):\n pclass_shading_dict = {\n 'pFSM':'100,165,200',\n 'pNIC':'111,189,113',\n 'pNNC':'232,98,76',\n 'pISM':'248,132,85'\n }\n bed['rgb'] = bed['pclass'].map(pclass_shading_dict).fillna('0,0,0')\n\n bed.gene = bed.gene.apply(lambda x: x[:9])\n gene_sizes = bed['gene'].apply(lambda x: len(x))\n max_gene = max(gene_sizes)\n\n pb_sizes = bed['pb_acc'].apply(lambda x: len(x))\n max_pb = max(pb_sizes)\n\n bed['cpm'] = bed['cpm'].apply(lambda cpm: str(cpm) if int(cpm) <=1000 else f'{int(cpm)/1000:.1f}K')\n cpm_sizes = bed['cpm'].apply(lambda x: len(x))\n cpm_len = max(cpm_sizes)\n \n\n bed['new_acc_full'] = bed['acc_full']\n # bed['new_acc_full'] = bed.apply(lambda row: f'{row.gene:_<{max_gene+1}}{row.pb_acc:_<{max_pb+1}}{row.pclass}{row.cpm:_>{cpm_len+1}}', axis = 1)\n # bed['new_acc_full'] = bed.apply(lambda row: f'{row.gene}_{row.pb_acc:_<{max_pb+1}}{row.pclass}{row.cpm:_>{cpm_len+1}}', axis = 1)\n\n\n filter_names = ['chrom','chromStart','chromStop','new_acc_full','score','strand','thickStart','thickEnd','rgb','blockCount','blockSizes','blockStarts']\n bed = bed[filter_names]\n with open(f'{name}_shaded_protein_class.bed12', 'w') as ofile:\n ofile.write(f'track name=\"{name.capitalize()} PacBio Protein\" itemRgb=On\\n')\n bed.to_csv(ofile, sep='\\t', index=None, header=None)\n \ndef add_rgb_shading(name, bed_file):\n bed_names = ['chrom','chromStart','chromStop','acc_full','score','strand','thickStart','thickEnd','itemRGB','blockCount','blockSizes','blockStarts']\n bed = pd.read_table(bed_file, names=bed_names)\n split_size=len(bed.loc[0,'acc_full'].split('|'))\n if split_size==3:\n bed[['gene', 'pb_acc', 'cpm']] = bed['acc_full'].str.split('|', expand=True)\n if split_size==4:\n bed[['gene', 'pb_acc','pclass', 'cpm']] = bed['acc_full'].str.split('|', expand=True)\n bed = bed[bed.gene != '-']\n\n add_rgb_shading_cpm(name, bed.copy(), split_size)\n if split_size==4:\n add_rgb_shading_pclass(name,bed)\n\n\ndef main():\n parser = argparse.ArgumentParser(\"IO file locations for making region bed\")\n parser.add_argument(\"--name\", action=\"store\", dest=\"name\", help=\"name of sample - used for output file name\")\n parser.add_argument(\"--bed_file\", action=\"store\", dest = \"bed_file\", help=\"sample bed with cds\")\n results = parser.parse_args()\n add_rgb_shading(results.name, results.bed_file)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n# %%\n" ]
[ [ "pandas.DataFrame", "pandas.read_table", "pandas.merge" ] ]
Stick-To/Deep_Conv_Backone_tensorflow
[ "c22016f7a41bc7b0caf85ff3db63d9bdab867401" ]
[ "testdpn.py" ]
[ "import tensorflow as tf\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.datasets import cifar100\nimport DPN as net\nimport numpy as np\nimport sys\nfrom keras.preprocessing.image import ImageDataGenerator\nimport os\nimport math\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndevice_name = tf.test.gpu_device_name()\nif device_name is not '':\n print('Found GPU Device!')\nelse:\n print('Found GPU Device Failed!')\n\nconfig = {\n # dpn92 [96, 96, 256], dpb98 [160, 160, 256]\n 'first_dpn_block_filters': [96, 96, 256],\n # dpn92 [3, 4, 20, 3], dpb98 [3, 6, 20, 3]\n 'dpn_block_list': [3, 4, 20, 3],\n\n # parameters for conv and pool before dense block\n 'init_conv_filters': [16],\n 'init_conv_kernel_size': [3],\n 'init_conv_strides': [1],\n 'init_pooling_pool_size': 3,\n 'init_pooling_strides': 2,\n\n # dpn92 [16, 32, 24, 128], dpb98 [16, 32, 32, 128]\n 'k': [16, 32, 24, 128],\n # dpn92 32, dpb98 40\n 'G': 32\n}\n\nmean = np.array([123.68, 116.779, 103.979]).reshape((1, 1, 1, 3))\ndata_shape = (32, 32, 3)\nnum_train = 50000\nnum_test = 10000\nnum_classes = 10\ntrain_batch_size = 128\ntest_batch_size = 200\nepochs = 300\nweight_decay = 1e-4\nkeep_prob = 0.8\nlr = math.sqrt(0.1)\n\n(x_train, y_train) , (x_test, y_test) = cifar10.load_data()\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\ntrain_gen = ImageDataGenerator(\n horizontal_flip=True,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.1,\n).flow(x_train, y_train, batch_size=train_batch_size)\ntest_gen = ImageDataGenerator().flow(x_test, y_test, batch_size=test_batch_size)\n\nreduce_lr_epoch = [epochs//2, 3*epochs//4]\ntestnet = net.DPN(config, data_shape, num_classes, weight_decay, keep_prob, 'channels_last')\nfor epoch in range(epochs):\n print('-'*20, 'epoch', epoch, '-'*20)\n train_acc = []\n train_loss = []\n test_acc = []\n # reduce learning rate\n if epoch in reduce_lr_epoch:\n lr = lr * 0.1\n print('reduce learning rate =', lr, 'now')\n # train one epoch\n for iter in range(num_train//train_batch_size):\n # get and preprocess image\n images, labels = train_gen.next()\n images = images - mean\n # train_one_batch also can accept your own session\n loss, acc = testnet.train_one_batch(images, labels, lr)\n train_acc.append(acc)\n train_loss.append(loss)\n sys.stdout.write(\"\\r>> train \"+str(iter+1)+'/'+str(num_train//train_batch_size)+' loss '+str(loss)+' acc '+str(acc))\n mean_train_loss = np.mean(train_loss)\n mean_train_acc = np.mean(train_acc)\n sys.stdout.write(\"\\n\")\n print('>> epoch', epoch, 'train mean loss', mean_train_acc, 'train mean acc', mean_train_acc)\n\n # validate one epoch\n for iter in range(num_test//test_batch_size):\n # get and preprocess image\n images, labels = test_gen.next()\n images = images - mean\n # validate_one_batch also can accept your own session\n logit, acc = testnet.validate_one_batch(images, labels)\n test_acc.append(acc)\n sys.stdout.write(\"\\r>> test \"+str(iter+1)+'/'+str(num_test//test_batch_size)+' acc '+str(acc))\n mean_val_acc = np.mean(test_acc)\n sys.stdout.write(\"\\n\")\n print('>> epoch', epoch, ' test mean acc', mean_val_acc)\n\n # logit = testnet.test(images)\n # testnet.save_weight(self, mode, path, sess=None)\n # testnet.load_weight(self, mode, path, sess=None)\n\n\n" ]
[ [ "numpy.array", "tensorflow.test.gpu_device_name", "numpy.mean" ] ]
yixuan/almond
[ "b37de1054a415c5a42ea97a88d935fad1ae3c148" ]
[ "package/almond/langevin.py" ]
[ "import math\nimport numpy as np\nimport mxnet as mx\nimport mxnet.ndarray as nd\n\n\n# Prototype of a probility model\nclass ProbModel:\n def log_pdf(self, x, args):\n f = 0.0\n return f\n\n def log_pdf_grad(self, x, args):\n grad = nd.zeros_like(x)\n return grad\n\n\nclass Langevin:\n # Constructor\n def __init__(self, shape, ctx=mx.cpu()):\n # Device\n self.ctx = ctx\n # Shape of particles\n # In Langevin, the same as shape\n # In LangevinMultichain, add a \"chain\" dimension to shape\n self.shape = shape\n # Current position vector\n self.current = None\n\n # Generate a normal random vector\n def normal_noise(self, sd):\n return nd.random.normal(scale=sd, shape=self.shape, ctx=self.ctx)\n\n # Langevin diffusion\n def sample(self, model, start, step_size, num, burnin=10, args=None):\n # Initial position\n self.current = start\n # Result array\n res = nd.zeros(shape=(num - burnin, ) + self.shape, ctx=self.ctx)\n # Standard deviation of noise\n sd = math.sqrt(2.0 * step_size)\n # Diffusion\n for i in range(num):\n self.current = self.current + \\\n step_size * model.log_pdf_grad(self.current, args).detach() + \\\n self.normal_noise(sd)\n if i >= burnin:\n res[i - burnin] = self.current\n return res\n\n\n\n\n\n# Prototype of a probility model\nclass ProbModelBatch:\n # If the parameter of interst has shape (d1, d2, ...), then\n # x is of shape (nbatch, d1, d2, ...)\n # Returns an array of length [nbatch]\n def log_pdf(self, x, args):\n f = nd.zeros(x.shape[0])\n return f\n\n def log_pdf_grad(self, x, args):\n grad = nd.zeros_like(x)\n return grad\n\n\nclass LangevinMultiChain(Langevin):\n # Constructor\n def __init__(self, shape, nchain, ctx=mx.cpu()):\n super(LangevinMultiChain, self).__init__(shape, ctx)\n\n # Number of independent chains\n self.nchain = nchain\n # Shape of particles\n self.shape = (nchain, ) + self.shape\n\n # Langevin diffusion\n def sample(self, model, start, step_size, burnin=10, args=None):\n # Check shape\n if start.shape != self.shape:\n raise ValueError(\"shape of 'start' is inconsistent with the sampler\")\n # Initial position\n self.current = start\n # Standard deviation of noise\n sd = math.sqrt(2.0 * step_size)\n # Diffusion\n for i in range(burnin):\n self.current = self.current + \\\n step_size * model.log_pdf_grad(self.current, args).detach() + \\\n self.normal_noise(sd)\n return self.current\n\n\n\n\n\nclass Model1:\n def __init__(self, a, b, ctx=mx.cpu()):\n self.a = a\n self.b = b\n self.ctx = ctx\n\n def log_pdf(self, x, args):\n x1 = x[0].asscalar()\n x2 = x[1].asscalar()\n f = -(self.a - x1) ** 2 - self.b * (x2 - x1 * x1) ** 2\n return f\n\n def log_pdf_grad(self, x, args):\n x1 = x[0].asscalar()\n x2 = x[1].asscalar()\n dx1 = -2.0 * (x1 - self.a) - 4.0 * self.b * (x1 * x1 - x2) * x1\n dx2 = -2.0 * self.b * (x2 - x1 * x1)\n return nd.array([dx1, dx2], ctx=self.ctx)\n\n\n# Multi-chain version\nclass Model2:\n def __init__(self, a, b, ctx=mx.cpu()):\n self.a = a\n self.b = b\n self.ctx = ctx\n\n # x: [nchain x 2]\n def log_pdf(self, x, args):\n x1 = x[:, 0]\n x2 = x[:, 1]\n f = -nd.square(self.a - x1) - self.b * nd.square(x2 - x1 * x1)\n return f\n\n def log_pdf_grad(self, x, args):\n x1 = x[:, 0]\n x2 = x[:, 1]\n dx1 = -2.0 * (x1 - self.a) - 4.0 * self.b * (x1 * x1 - x2) * x1\n dx2 = -2.0 * self.b * (x2 - x1 * x1)\n return nd.stack(dx1, dx2, axis=1)\n\n\nif __name__ == \"__main__\":\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n np.random.seed(123)\n mx.random.seed(123)\n ctx = mx.cpu()\n\n model = Model1(0.3, 0.3, ctx=ctx)\n start = nd.array([0.0, 0.0], ctx=ctx)\n sampler = Langevin(start.shape, ctx=ctx)\n res = sampler.sample(model, start, step_size=0.1, num=1000, burnin=200)\n\n plt.scatter(res[:, 0].asnumpy(), res[:, 1].asnumpy())\n sns.jointplot(res[:, 0].asnumpy(), res[:, 1].asnumpy(), stat_func=None)\n\n np.random.seed(123)\n mx.random.seed(123)\n\n model = Model2(0.3, 0.3, ctx=ctx)\n nchain = 1000\n start = nd.random_normal(shape=(nchain, 2), ctx=ctx)\n sampler = LangevinMultiChain(start.shape[1:], nchain, ctx=ctx)\n res = sampler.sample(model, start, step_size=0.1, burnin=200)\n\n plt.scatter(res[:, 0].asnumpy(), res[:, 1].asnumpy())\n sns.jointplot(res[:, 0].asnumpy(), res[:, 1].asnumpy(), stat_func=None)\n" ]
[ [ "numpy.random.seed" ] ]
bgotthold-usgs/batdetect
[ "0d4a70f1cda9f6104f6f785f0d953f802fddf0f1" ]
[ "bat_eval/myskimage.py" ]
[ "\"\"\"\nThis file contains code copied from skimage package. \nSpecifically, this file is a standalone implementation of \nskimage.filters's \"gaussian\" function.\nThe \"image_as_float\" and \"guess_spatial_dimensions\" functions\nwere also copied to as dependencies of \"gaussian\" function.\n\"\"\"\nfrom __future__ import division\nimport numbers\nimport collections as coll\nimport numpy as np\nfrom scipy import ndimage as ndi\n\n__all__ = ['gaussian']\n\ndtype_range = {np.bool_: (False, True),\n np.bool8: (False, True),\n np.uint8: (0, 255),\n np.uint16: (0, 65535),\n np.int8: (-128, 127),\n np.int16: (-32768, 32767),\n np.int64: (-2**63, 2**63 - 1),\n np.uint64: (0, 2**64 - 1),\n np.int32: (-2**31, 2**31 - 1),\n np.uint32: (0, 2**32 - 1),\n np.float32: (-1, 1),\n np.float64: (-1, 1)}\n\ninteger_types = (np.uint8, np.uint16, np.int8, np.int16)\n\n_supported_types = (np.bool_, np.bool8,\n np.uint8, np.uint16, np.uint32, np.uint64,\n np.int8, np.int16, np.int32, np.int64,\n np.float32, np.float64)\n\ndtype_range[np.float16] = (-1, 1)\n_supported_types += (np.float16, )\n\ndef warn(msg):\n print(msg)\n\n\ndef img_as_float(image):\n dtype=np.float32\n force_copy = False\n\n \"\"\"\n Convert an image to the requested data-type.\n Warnings are issued in case of precision loss, or when negative values\n are clipped during conversion to unsigned integer types (sign loss).\n Floating point values are expected to be normalized and will be clipped\n to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or\n signed integers respectively.\n Numbers are not shifted to the negative side when converting from\n unsigned to signed integer types. Negative values will be clipped when\n converting to unsigned integers.\n Parameters\n ----------\n image : ndarray\n Input image.\n dtype : dtype\n Target data-type.\n force_copy : bool, optional\n Force a copy of the data, irrespective of its current dtype.\n uniform : bool, optional\n Uniformly quantize the floating point range to the integer range.\n By default (uniform=False) floating point values are scaled and\n rounded to the nearest integers, which minimizes back and forth\n conversion errors.\n References\n ----------\n .. [1] DirectX data conversion rules.\n http://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx\n .. [2] Data Conversions. In \"OpenGL ES 2.0 Specification v2.0.25\",\n pp 7-8. Khronos Group, 2010.\n .. [3] Proper treatment of pixels as integers. A.W. Paeth.\n In \"Graphics Gems I\", pp 249-256. Morgan Kaufmann, 1990.\n .. [4] Dirty Pixels. J. Blinn. In \"Jim Blinn's corner: Dirty Pixels\",\n pp 47-57. Morgan Kaufmann, 1998.\n \"\"\"\n image = np.asarray(image)\n dtypeobj = np.dtype(dtype)\n dtypeobj_in = image.dtype\n dtype = dtypeobj.type\n dtype_in = dtypeobj_in.type\n\n if dtype_in == dtype:\n if force_copy:\n image = image.copy()\n return image\n\n if not (dtype_in in _supported_types and dtype in _supported_types):\n raise ValueError(\"can not convert %s to %s.\" % (dtypeobj_in, dtypeobj))\n\n def sign_loss():\n warn(\"Possible sign loss when converting negative image of type \"\n \"%s to positive image of type %s.\" % (dtypeobj_in, dtypeobj))\n\n def prec_loss():\n warn(\"Possible precision loss when converting from \"\n \"%s to %s\" % (dtypeobj_in, dtypeobj))\n\n def _dtype(itemsize, *dtypes):\n # Return first of `dtypes` with itemsize greater than `itemsize`\n return next(dt for dt in dtypes if itemsize < np.dtype(dt).itemsize)\n\n def _dtype2(kind, bits, itemsize=1):\n # Return dtype of `kind` that can store a `bits` wide unsigned int\n def compare(x, y, kind='u'):\n if kind == 'u':\n return x <= y\n else:\n return x < y\n\n s = next(i for i in (itemsize, ) + (2, 4, 8) if compare(bits, i * 8,\n kind=kind))\n return np.dtype(kind + str(s))\n\n\n def _scale(a, n, m, copy=True):\n # Scale unsigned/positive integers from n to m bits\n # Numbers can be represented exactly only if m is a multiple of n\n # Output array is of same kind as input.\n kind = a.dtype.kind\n if n > m and a.max() < 2 ** m:\n mnew = int(np.ceil(m / 2) * 2)\n if mnew > m:\n dtype = \"int%s\" % mnew\n else:\n dtype = \"uint%s\" % mnew\n n = int(np.ceil(n / 2) * 2)\n msg = (\"Downcasting %s to %s without scaling because max \"\n \"value %s fits in %s\" % (a.dtype, dtype, a.max(), dtype))\n warn(msg)\n return a.astype(_dtype2(kind, m))\n elif n == m:\n return a.copy() if copy else a\n elif n > m:\n # downscale with precision loss\n prec_loss()\n if copy:\n b = np.empty(a.shape, _dtype2(kind, m))\n np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,\n casting='unsafe')\n return b\n else:\n a //= 2**(n - m)\n return a\n elif m % n == 0:\n # exact upscale to a multiple of n bits\n if copy:\n b = np.empty(a.shape, _dtype2(kind, m))\n np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)\n return b\n else:\n a = np.array(a, _dtype2(kind, m, a.dtype.itemsize), copy=False)\n a *= (2**m - 1) // (2**n - 1)\n return a\n else:\n # upscale to a multiple of n bits,\n # then downscale with precision loss\n prec_loss()\n o = (m // n + 1) * n\n if copy:\n b = np.empty(a.shape, _dtype2(kind, o))\n np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)\n b //= 2**(o - m)\n return b\n else:\n a = np.array(a, _dtype2(kind, o, a.dtype.itemsize), copy=False)\n a *= (2**o - 1) // (2**n - 1)\n a //= 2**(o - m)\n return a\n\n kind = dtypeobj.kind\n kind_in = dtypeobj_in.kind\n itemsize = dtypeobj.itemsize\n itemsize_in = dtypeobj_in.itemsize\n\n if kind == 'b':\n # to binary image\n if kind_in in \"fi\":\n sign_loss()\n prec_loss()\n return image > dtype_in(dtype_range[dtype_in][1] / 2)\n\n if kind_in == 'b':\n # from binary image, to float and to integer\n result = image.astype(dtype)\n if kind != 'f':\n result *= dtype(dtype_range[dtype][1])\n return result\n\n if kind in 'ui':\n imin = np.iinfo(dtype).min\n imax = np.iinfo(dtype).max\n if kind_in in 'ui':\n imin_in = np.iinfo(dtype_in).min\n imax_in = np.iinfo(dtype_in).max\n\n if kind_in == 'f':\n if np.min(image) < -1.0 or np.max(image) > 1.0:\n raise ValueError(\"Images of type float must be between -1 and 1.\")\n if kind == 'f':\n # floating point -> floating point\n if itemsize_in > itemsize:\n prec_loss()\n return image.astype(dtype)\n\n # floating point -> integer\n prec_loss()\n # use float type that can represent output integer type\n image = np.array(image, _dtype(itemsize, dtype_in,\n np.float32, np.float64))\n if not uniform:\n if kind == 'u':\n image *= imax\n else:\n image *= imax - imin\n image -= 1.0\n image /= 2.0\n np.rint(image, out=image)\n np.clip(image, imin, imax, out=image)\n elif kind == 'u':\n image *= imax + 1\n np.clip(image, 0, imax, out=image)\n else:\n image *= (imax - imin + 1.0) / 2.0\n np.floor(image, out=image)\n np.clip(image, imin, imax, out=image)\n return image.astype(dtype)\n\n if kind == 'f':\n # integer -> floating point\n if itemsize_in >= itemsize:\n prec_loss()\n # use float type that can exactly represent input integers\n image = np.array(image, _dtype(itemsize_in, dtype,\n np.float32, np.float64))\n if kind_in == 'u':\n image /= imax_in\n # DirectX uses this conversion also for signed ints\n #if imin_in:\n # np.maximum(image, -1.0, out=image)\n else:\n image *= 2.0\n image += 1.0\n image /= imax_in - imin_in\n return image.astype(dtype)\n\n if kind_in == 'u':\n if kind == 'i':\n # unsigned integer -> signed integer\n image = _scale(image, 8 * itemsize_in, 8 * itemsize - 1)\n return image.view(dtype)\n else:\n # unsigned integer -> unsigned integer\n return _scale(image, 8 * itemsize_in, 8 * itemsize)\n\n if kind == 'u':\n # signed integer -> unsigned integer\n sign_loss()\n image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize)\n result = np.empty(image.shape, dtype)\n np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')\n return result\n\n # signed integer -> signed integer\n if itemsize_in > itemsize:\n return _scale(image, 8 * itemsize_in - 1, 8 * itemsize - 1)\n image = image.astype(_dtype2('i', itemsize * 8))\n image -= imin_in\n image = _scale(image, 8 * itemsize_in, 8 * itemsize, copy=False)\n image += imin\n\n return image.astype(dtype)\n\n\n\ndef guess_spatial_dimensions(image):\n \"\"\"Make an educated guess about whether an image has a channels dimension.\n Parameters\n ----------\n image : ndarray\n The input image.\n Returns\n -------\n spatial_dims : int or None\n The number of spatial dimensions of `image`. If ambiguous, the value\n is ``None``.\n Raises\n ------\n ValueError\n If the image array has less than two or more than four dimensions.\n \"\"\"\n if image.ndim == 2:\n return 2\n if image.ndim == 3 and image.shape[-1] != 3:\n return 3\n if image.ndim == 3 and image.shape[-1] == 3:\n return None\n if image.ndim == 4 and image.shape[-1] == 3:\n return 3\n else:\n raise ValueError(\"Expected 2D, 3D, or 4D array, got %iD.\" % image.ndim)\n\n\ndef gaussian(image, sigma=1, output=None, mode='nearest', cval=0,\n multichannel=None):\n \"\"\"Multi-dimensional Gaussian filter\n\n Parameters\n ----------\n image : array-like\n Input image (grayscale or color) to filter.\n sigma : scalar or sequence of scalars, optional\n Standard deviation for Gaussian kernel. The standard\n deviations of the Gaussian filter are given for each axis as a\n sequence, or as a single number, in which case it is equal for\n all axes.\n output : array, optional\n The ``output`` parameter passes an array in which to store the\n filter output.\n mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the array borders are\n handled, where `cval` is the value when mode is equal to\n 'constant'. Default is 'nearest'.\n cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0\n multichannel : bool, optional (default: None)\n Whether the last axis of the image is to be interpreted as multiple\n channels. If True, each channel is filtered separately (channels are\n not mixed together). Only 3 channels are supported. If `None`,\n the function will attempt to guess this, and raise a warning if\n ambiguous, when the array has shape (M, N, 3).\n\n Returns\n -------\n filtered_image : ndarray\n the filtered array\n\n Notes\n -----\n This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.\n\n Integer arrays are converted to float.\n\n The multi-dimensional filter is implemented as a sequence of\n one-dimensional convolution filters. The intermediate arrays are\n stored in the same data type as the output. Therefore, for output\n types with a limited precision, the results may be imprecise\n because intermediate results may be stored with insufficient\n precision.\n\n Examples\n --------\n\n >>> a = np.zeros((3, 3))\n >>> a[1, 1] = 1\n >>> a\n array([[ 0., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 0.]])\n >>> gaussian(a, sigma=0.4) # mild smoothing\n array([[ 0.00163116, 0.03712502, 0.00163116],\n [ 0.03712502, 0.84496158, 0.03712502],\n [ 0.00163116, 0.03712502, 0.00163116]])\n >>> gaussian(a, sigma=1) # more smooting\n array([[ 0.05855018, 0.09653293, 0.05855018],\n [ 0.09653293, 0.15915589, 0.09653293],\n [ 0.05855018, 0.09653293, 0.05855018]])\n >>> # Several modes are possible for handling boundaries\n >>> gaussian(a, sigma=1, mode='reflect')\n array([[ 0.08767308, 0.12075024, 0.08767308],\n [ 0.12075024, 0.16630671, 0.12075024],\n [ 0.08767308, 0.12075024, 0.08767308]])\n >>> # For RGB images, each is filtered separately\n >>> from skimage.data import astronaut\n >>> image = astronaut()\n >>> filtered_img = gaussian(image, sigma=1, multichannel=True)\n\n \"\"\"\n\n spatial_dims = guess_spatial_dimensions(image)\n if spatial_dims is None and multichannel is None:\n msg = (\"Images with dimensions (M, N, 3) are interpreted as 2D+RGB \"\n \"by default. Use `multichannel=False` to interpret as \"\n \"3D image with last dimension of length 3.\")\n warn(RuntimeWarning(msg))\n multichannel = True\n if np.any(np.asarray(sigma) < 0.0):\n raise ValueError(\"Sigma values less than zero are not valid\")\n if multichannel:\n # do not filter across channels\n if not isinstance(sigma, coll.Iterable):\n sigma = [sigma] * (image.ndim - 1)\n if len(sigma) != image.ndim:\n sigma = np.concatenate((np.asarray(sigma), [0]))\n #image = img_as_float(image)\n return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval)\n" ]
[ [ "numpy.max", "numpy.ceil", "numpy.empty", "numpy.asarray", "numpy.rint", "scipy.ndimage.gaussian_filter", "numpy.min", "numpy.multiply", "numpy.floor_divide", "numpy.floor", "numpy.clip", "numpy.iinfo", "numpy.dtype", "numpy.maximum" ] ]
forkunited/ltprg
[ "4e40d3571d229023df0f845c68643024e04bc202" ]
[ "src/test/py/ltprg/game/synthetic_nom_ref/network_components.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass MLP(nn.Module):\n\tdef __init__(self, in_sz, h_szs, out_sz, \n\t\thiddens_nonlinearity, final_nonlinearity):\n\t\t# in_sz \t\t\t\t\t (int, input sz)\n\t\t# h_szs\t\t\t\t\t\t (list of hidden layer szs)\n\t\t# out_sz\t\t\t\t\t (int, output sz)\n\t\t# hiddens_nonlinearity ('relu', 'tanh')\n\t\t# final_nonlinearity \t\t ('logSoftmax', 'sigmoid')\n\t\tsuper(MLP, self).__init__()\n\n\t\tassert hiddens_nonlinearity in ['relu', 'tanh']\n\t\tassert final_nonlinearity in ['logSoftmax', 'sigmoid']\n\n\t\tif hiddens_nonlinearity == 'relu':\n\t\t\tself.hiddens_nonlinearity = nn.ReLU()\n\t\telif hiddens_nonlinearity == 'tanh':\n\t\t\tself.hiddens_nonlinearity = nn.Tanh()\n\n\t\tif final_nonlinearity == 'logSoftmax':\n\t\t\tself.final_nonlinearity = nn.LogSoftmax()\n\t\telif final_nonlinearity == 'sigmoid':\n\t\t\tself.final_nonlinearity = nn.Sigmoid()\n\n\t\tlayer_szs = [in_sz]\n\t\tlayer_szs.extend(h_szs)\n\t\tlayer_szs.append(out_sz)\n\t\tlayers = []\n\t\tfor i in range(1,len(layer_szs)):\n\t\t\tlayers.append(nn.Linear(layer_szs[i-1], layer_szs[i]))\n\t\tself.layers = nn.ModuleList(layers)\n\n\n\t\t# hidden_layers = [nn.Linear(in_sz, h_szs[0])]\n\t\t# for i in range(1, len(h_szs)):\n\t\t# \thidden_layers.append(nn.Linear(h_szs[i-1], h_szs[i]))\n\t\t# self.layers = nn.ModuleList(hidden_layers)\n\t\t# self.final = nn.Linear(h_szs[-1], out_sz)\n\n\tdef forward(self, x):\n\t\t# for l in self.layers:\n\t\t# \tx = self.hiddens_nonlinearity(l(x))\n\t\t# x = self.final(x)\n\t\t# x = self.final_nonlinearity(x)\n\n\t\tfor i, l in enumerate(self.layers):\n\t\t\tx = l(x)\n\t\t\tif i<len(self.layers)-1:\n\t\t\t\tx = self.hiddens_nonlinearity(x)\n\t\tx = self.final_nonlinearity(x)\n\n\t\treturn x\n\n\tdef get_embedding(self):\n\t\t'''\n\t\tpull out the first layer of weights, which corresponds to \n\t\tan embedding of input 1-hot vector.\n\t\t'''\n\t\tfirst_layer = self.layers[0]\n\t\tparams = list(first_layer.parameters())\n\t\tweights = params[0].data.numpy().transpose() #transpose or no?\n\t\t#first element in params is multiplicative (FC), second is additive (bias)\n\t\treturn weights\n" ]
[ [ "torch.nn.LogSoftmax", "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.Sigmoid", "torch.nn.Tanh", "torch.nn.ReLU" ] ]
Algue-Rythme/GAT-Skim-Gram
[ "e6e9db5a936e87a2adfdf81a1f00d952d800d1c8" ]
[ "dataset.py" ]
[ "import itertools\nfrom itertools import chain\nfrom itertools import product\nimport os\nfrom collections import defaultdict\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n\ndef standardize_features(features):\n mean = tf.math.reduce_mean(features, axis=0, keepdims=True)\n std = tf.math.reduce_std(features, axis=0, keepdims=True)\n features = (features - mean) / std\n return features\n\ndef node_features_from_attribute_file(prefix, standardize):\n attribute_file = os.path.join(prefix, '%s_node_attributes.txt'%prefix)\n try:\n with open(attribute_file, 'r') as f:\n tokens = f.readline()\n attributes = []\n while tokens:\n line = list(map(float, tokens.split(',')))\n attributes.append(line)\n tokens = f.readline()\n if standardize:\n attributes = tf.constant(attributes, dtype=tf.float32)\n attributes = standardize_features(attributes)\n else:\n attributes = tf.constant(attributes, dtype=tf.float32)\n return attributes\n except IOError:\n return None\n\ndef read_label_file(label_file):\n with open(label_file, 'r') as f:\n tokens = f.readline()\n labels = []\n label_set = set()\n while tokens:\n label = int(tokens)\n label_set.add(label)\n labels.append(label)\n tokens = f.readline()\n return labels, label_set\n\ndef node_features_from_label_file(prefix):\n label_file = os.path.join(prefix, '%s_node_labels.txt'%prefix)\n try:\n labels, label_set = read_label_file(label_file)\n except IOError:\n return None\n labels = tf.keras.backend.one_hot(labels, len(label_set))\n return tf.constant(labels, dtype=tf.float32)\n\ndef get_normalized_degree(adj, normalized=False):\n degree = tf.math.reduce_sum(adj, axis=-1, keepdims=True)\n if not normalized:\n return degree\n log_degree = tf.math.log(1 + degree)\n return log_degree\n\ndef get_node_features(prefix, with_node_features, standardize, graph_ids, new_node_ids, graph_adj):\n node_attributes = node_features_from_attribute_file(prefix, standardize) if with_node_features else None\n labels = node_features_from_label_file(prefix)\n node_features = node_attributes if node_attributes is not None else labels\n if node_attributes is not None and labels is not None:\n node_features = tf.concat([node_features, labels], axis=1)\n if node_attributes is None and labels is None:\n node_features = [get_normalized_degree(adj, normalized=False) for adj in graph_adj]\n return node_features\n del node_attributes, labels\n num_node_features = int(node_features.shape[1])\n graph_node_features = [np.zeros(shape=(len(nodes), num_node_features), dtype=np.float32) for nodes in graph_adj]\n for node_id, graph_id in enumerate(graph_ids):\n new_node_id = new_node_ids[node_id]\n graph_node_features[graph_id][new_node_id,:] = node_features[node_id,:]\n graph_node_features = [tf.constant(graph, dtype=tf.float32) for graph in graph_node_features]\n return graph_node_features\n\ndef get_graph_node_ids(prefix):\n graph_file = os.path.join(prefix, '%s_graph_indicator.txt'%prefix)\n graph_ids = []\n graph_nodes = defaultdict(list)\n new_node_ids = dict()\n with open(graph_file, 'r') as f:\n tokens = f.readline()\n node_id = 0\n while tokens:\n graph_id = int(tokens)-1\n graph_ids.append(graph_id)\n new_node_ids[node_id] = len(graph_nodes[graph_id])\n graph_nodes[graph_id].append(node_id)\n node_id += 1\n tokens = f.readline()\n return graph_ids, graph_nodes, new_node_ids\n\ndef get_graph_adj(prefix, graph_nodes, graph_ids, new_node_ids):\n graph_adj = [np.zeros(shape=(len(nodes),len(nodes)), dtype=np.float32) for _, nodes in graph_nodes.items()]\n adj_file = os.path.join(prefix, '%s_A.txt'%prefix)\n adj_lst = []\n with open(adj_file, 'r') as f:\n tokens = f.readline()\n while tokens:\n tokens = tokens.split(',')\n node_a, node_b = int(tokens[0])-1, int(tokens[1])-1\n adj_lst.append((node_a, node_b))\n graph_a, graph_b = graph_ids[node_a], graph_ids[node_b]\n assert graph_a == graph_b\n node_a, node_b = new_node_ids[node_a], new_node_ids[node_b]\n graph_adj[graph_a][node_a, node_b] = 1.\n tokens = f.readline()\n graph_adj = [tf.constant(adj, dtype=tf.float32) for adj in graph_adj]\n return graph_adj, adj_lst\n\ndef get_edge_features(prefix, _, graph_nodes, graph_ids, new_node_ids, adj_lst):\n label_file = os.path.join(prefix, '%s_edge_labels.txt'%prefix)\n try:\n labels, label_set = read_label_file(label_file)\n except IOError:\n return None\n graph_edge_labels = [np.zeros(shape=(len(nodes),len(nodes)), dtype=np.int32) for _, nodes in graph_nodes.items()]\n for label, (node_a, node_b) in zip(labels, adj_lst):\n graph_a = graph_ids[node_a]\n new_node_a, new_node_b = new_node_ids[node_a], new_node_ids[node_b]\n graph_edge_labels[graph_a][new_node_a, new_node_b] = label\n graph_edge_labels = [tf.keras.backend.one_hot(graph, len(label_set)) for graph in graph_edge_labels]\n return graph_edge_labels\n\ndef print_statistics(node_features, graph_adj, edge_features=None):\n num_nodes = [int(graph.shape[0]) for graph in graph_adj]\n num_edges = [int(tf.reduce_sum(graph)/2.) for graph in graph_adj]\n print('num_graphs: %d'%len(graph_adj))\n print('num_nodes: %d'%sum(num_nodes))\n print('num_edges: %d'%sum(num_edges))\n print('avg_nodes: %.2f'%np.array(num_nodes).mean())\n print('avg_edges: %.2f'%np.array(num_edges).mean())\n print('num_node_features: %d'%int(node_features[0].shape[1]))\n if edge_features is not None:\n print('num_edge_features: %d'%int(edge_features[0].shape[2]))\n else:\n print('no edge features')\n\ndef read_dortmund(prefix, with_node_features, with_edge_features, standardize):\n graph_ids, graph_nodes, new_node_ids = get_graph_node_ids(prefix)\n graph_adj, adj_lst = get_graph_adj(prefix, graph_nodes, graph_ids, new_node_ids)\n node_features = get_node_features(prefix, with_node_features, standardize, graph_ids, new_node_ids, graph_adj)\n edge_features = None\n if with_edge_features:\n edge_features = get_edge_features(prefix, standardize, graph_nodes, graph_ids, new_node_ids, adj_lst)\n if edge_features is None:\n return node_features, graph_adj\n return node_features, graph_adj, edge_features\n\ndef read_dataset(name, with_node_features=True, with_edge_features=False, standardize=True):\n print('opening %s...'%name, flush=True)\n if name == 'FRANKENSTEIN':\n standardize = False\n graph_inputs = read_dortmund(name, with_node_features, with_edge_features, standardize)\n print('%s opened with success !'%name, flush=True)\n print_statistics(*graph_inputs)\n return graph_inputs\n\ndef generate_json(name):\n _, adjs = read_dataset(name, with_edge_features=False, standardize=False)\n output_dir = os.path.join('../graph2vec/', name)\n try:\n os.makedirs(output_dir)\n except OSError as e:\n pass\n for num_graph, adj in enumerate(adjs):\n file_name = os.path.join(output_dir, str(num_graph)+'.json')\n adj = adj.numpy()\n num_nodes = adj.shape[0]\n edges = []\n for i in range(num_nodes):\n for j in range(i):\n if adj[i,j] != 0:\n edges.append([i,j])\n with open(file_name, 'w') as file:\n file.write('{\"edges\": '+str(edges)+'}')\n\ndef read_graph_labels(dataset_name):\n labels_filename = os.path.join(dataset_name, '%s_graph_labels.txt'%dataset_name)\n labels, label_set = read_label_file(labels_filename)\n class_indexes_remapper = dict(zip(label_set, range(len(label_set))))\n labels = [class_indexes_remapper[label] for label in labels]\n return labels, len(label_set)\n\ndef read_image(image):\n image = tf.squeeze(image) if len(image.shape) == 2 else tf.math.reduce_mean(image, axis=-1)\n mask = tf.not_equal(image, tf.zeros(shape=image.shape, dtype=image.dtype))\n indices = tf.where(mask)\n luminosity = tf.gather_nd(image, indices)\n luminosity = tf.dtypes.cast(luminosity, dtype=tf.float32) / tf.constant(255., dtype=tf.float32)\n luminosity = tf.expand_dims(luminosity, axis=-1)\n indices_features = tf.dtypes.cast(indices, dtype=tf.float32)\n features = tf.concat([indices_features, luminosity], axis=1)\n features = features.numpy()\n indices = indices.numpy()\n indices_right = indices + np.array([0, 1], dtype=indices.dtype)\n indices_down = indices + np.array([1, 0], dtype=indices.dtype)\n iter_indices_right = product(enumerate(indices), enumerate(indices_right))\n iter_indices_down = product(enumerate(indices), enumerate(indices_down))\n adj_lst = []\n for (i, idi), (j, idj) in chain(iter_indices_right, iter_indices_down):\n if np.array_equal(idi, idj):\n adj_lst.append([i, j])\n adj_lst.append([j, i])\n return adj_lst, features\n\ndef init_data(data_type, parts):\n data = tfds.load(data_type, with_info=False)\n if parts == 'all':\n data = itertools.chain(data['train'], data['test'])\n num_data = 70 * 1000\n elif part in ['train', 'test']:\n data = data[part]\n num_data = 10 * 1000 if part == 'test' else 60 * 1000\n else:\n raise ValueError\n progbar = tf.keras.utils.Progbar(num_data,\n stateful_metrics=['num_nodes', 'num_edges'])\n prefix = data_type.upper() + '_' + part\n try:\n os.mkdir(prefix)\n except FileExistsError:\n pass\n return data, progbar, prefix\n\ndef produce_data_images(data_type, parts):\n data, progbar, prefix = init_data(data_type, parts)\n map_name = lambda name: os.path.join(prefix, name)\n with open(map_name(prefix+'_graph_indicator.txt'), 'w') as indicator_file, \\\n open(map_name(prefix+'_A.txt'), 'w') as adj_file, \\\n open(map_name(prefix+'_node_attributes.txt'), 'w') as features_file:\n num_nodes, num_edges = 1, 0\n for step, image_label in enumerate(data):\n adj, features = read_image(image_label['image'])\n for a, b in adj:\n adj_file.write(str(a+num_nodes)+', '+str(b+num_nodes)+'\\n')\n for feature in features:\n indicator_file.write(str(step+1)+'\\n')\n features_file.write(', '.join(map(str, feature.tolist()))+'\\n')\n num_nodes += int(features.shape[0])\n num_edges += len(adj)\n progbar.update(step+1, [('num_nodes', num_nodes+1), ('num_edges', num_edges)])\n\ndef produce_data_labels(data_type, parts):\n data, progbar, prefix = init_data(data_type, parts)\n with open(os.path.join(prefix, prefix+'_graph_labels.txt'), 'w') as labels_file:\n for step, image_label in enumerate(data):\n label = int(image_label['label'].numpy())\n labels_file.write(str(label)+'\\n')\n progbar.update(step+1)\n\ndef available_tasks():\n tasks = ['ENZYMES', 'PROTEINS', 'PROTEINS_full', 'MUTAG',\n 'PTC_MR', 'NCI1', 'NCI109', 'PTC_FR', 'DD',\n 'PTC_MR', 'PTC_FM', 'FRANKENSTEIN',\n 'REDDIT-BINARY', 'REDDIT-MULTI-5K',\n 'DLA', 'DLA2', 'MNIST_test',\n 'IMDB-BINARY', 'IMDB-MULTI',\n 'FASHION_MNIST_test', 'CIFAR10_test']\n return tasks\n\n\nif __name__ == '__main__':\n with tf.device('/cpu'):\n what = input('What do you want to do ? Generate JSON (tape json) or images (tape images). ')\n if what == 'images':\n confirm = input('Are you sure to generate ? This is long. Tape \"yes\" or exit. ')\n if confirm == 'yes':\n data_name = input('Name of the dataset: \\'mnist\\', \\'fashion_mnist\\' or \\'cifar10\\'. ')\n part = input('Choose between: train, test, all. ')\n category = input('What do you want to generate ? labels or graphs. ')\n if category == 'graphs':\n produce_data_images(data_name, part)\n elif category == 'labels':\n produce_data_labels(data_name, part)\n else:\n raise ValueError\n else:\n print('Exit procedure.')\n elif what == 'json':\n dataset_name = input('Dataset name ? ')\n generate_json(dataset_name)\n" ]
[ [ "tensorflow.dtypes.cast", "numpy.array", "tensorflow.zeros", "numpy.array_equal", "tensorflow.concat", "tensorflow.math.reduce_sum", "tensorflow.math.reduce_std", "tensorflow.where", "tensorflow.gather_nd", "tensorflow.expand_dims", "tensorflow.math.log", "tensorflow.math.reduce_mean", "tensorflow.constant", "tensorflow.squeeze", "tensorflow.reduce_sum", "tensorflow.device", "tensorflow.keras.utils.Progbar" ] ]
vandalt/species
[ "527dd900a60c4d691bd490569cd3b2007f9beead" ]
[ "species/util/data_util.py" ]
[ "\"\"\"\nUtility functions for data processing.\n\"\"\"\n\nfrom typing import Optional, List\n\nimport h5py\nimport numpy as np\n\nfrom typeguard import typechecked\n\nfrom scipy.interpolate import griddata\n\n\ndef update_sptype(sptypes):\n \"\"\"\n Function to update a list with spectral types to two characters (e.g., M8, L3, or T1).\n\n Parameters\n ----------\n sptypes : np.ndarray\n Input spectral types.\n\n Returns\n -------\n np.ndarray\n Updated spectral types.\n \"\"\"\n\n sptype_list = ['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'T', 'Y']\n\n for i, spt_item in enumerate(sptypes):\n if spt_item == 'None':\n pass\n\n elif spt_item == 'null':\n sptypes[i] = 'None'\n\n else:\n for list_item in sptype_list:\n try:\n sp_index = spt_item.index(list_item)\n sptypes[i] = spt_item[sp_index:sp_index+2]\n\n except ValueError:\n pass\n\n return sptypes\n\n\ndef update_filter(filter_in):\n \"\"\"\n Function to update a filter ID from the Vizier Photometry viewer VOTable to the filter ID from\n the SVO Filter Profile Service.\n\n Parameters\n ----------\n filter_in : str\n Filter ID in the format of the Vizier Photometry viewer.\n\n Returns\n -------\n str\n Filter ID in the format of the SVO Filter Profile Service.\n \"\"\"\n\n if filter_in[0:5] == b'2MASS':\n filter_out = str(b'2MASS/2MASS.'+filter_in[6:])\n\n elif filter_in[0:4] == b'WISE':\n filter_out = str(b'WISE/WISE.'+filter_in[5:])\n\n elif filter_in[0:10] == b'GAIA/GAIA2':\n filter_out = str(filter_in[0:9]+b'0'+filter_in[10:])\n\n else:\n filter_out = None\n\n return filter_out\n\n\n@typechecked\ndef sort_data(param_teff: np.ndarray,\n param_logg: Optional[np.ndarray],\n param_feh: Optional[np.ndarray],\n param_co: Optional[np.ndarray],\n param_fsed: Optional[np.ndarray],\n wavelength: np.ndarray,\n flux: np.ndarray) -> List[np.ndarray]:\n \"\"\"\n Parameters\n ----------\n param_teff : np.ndarray\n Array with the effective temperature (K) of each spectrum.\n param_logg : np.ndarray, None\n Array with the log10 surface gravity (cgs) of each spectrum.\n param_feh : np.ndarray, None\n Array with the metallicity of each spectrum. Not used if set to ``None``.\n param_co : np.ndarray, None\n Array with the carbon-to-oxygen ratio of each spectrum. Not used if set to ``None``.\n param_fsed : np.ndarray, None\n Array with the sedimentation parameter of each spectrum. Not used if set to ``None``.\n wavelength : np.ndarray\n Array with the wavelengths (um).\n flux : np.ndarray\n Array with the spectra with dimensions ``(n_spectra, n_wavelengths)``.\n\n Returns\n -------\n list(np.ndarray, )\n List with the unique values of the atmosphere parameters (each in a separate array), an\n array with the wavelengths, and a multidimensional array with the sorted spectra.\n \"\"\"\n\n n_spectra = param_teff.shape[0]\n\n teff_unique = np.unique(param_teff)\n spec_shape = [teff_unique.shape[0]]\n\n print('Grid points stored in the database:')\n print(f' - Teff = {teff_unique}')\n\n if param_logg is not None:\n logg_unique = np.unique(param_logg)\n spec_shape.append(logg_unique.shape[0])\n print(f' - log(g) = {logg_unique}')\n\n if param_feh is not None:\n feh_unique = np.unique(param_feh)\n spec_shape.append(feh_unique.shape[0])\n print(f' - [Fe/H] = {feh_unique}')\n\n if param_co is not None:\n co_unique = np.unique(param_co)\n spec_shape.append(co_unique.shape[0])\n print(f' - C/O = {co_unique}')\n\n if param_fsed is not None:\n fsed_unique = np.unique(param_fsed)\n spec_shape.append(fsed_unique.shape[0])\n print(f' - f_sed = {fsed_unique}')\n\n spec_shape.append(wavelength.shape[0])\n\n spectrum = np.zeros(spec_shape)\n\n for i in range(n_spectra):\n # The parameter order is: Teff, log(g), [Fe/H], C/O, f_sed\n # Not all parameters have to be included but the order matters\n\n index_teff = np.argwhere(teff_unique == param_teff[i])[0][0]\n spec_select = [index_teff]\n\n if param_logg is not None:\n index_logg = np.argwhere(logg_unique == param_logg[i])[0][0]\n spec_select.append(index_logg)\n\n if param_feh is not None:\n index_feh = np.argwhere(feh_unique == param_feh[i])[0][0]\n spec_select.append(index_feh)\n\n if param_co is not None:\n index_co = np.argwhere(co_unique == param_co[i])[0][0]\n spec_select.append(index_co)\n\n if param_fsed is not None:\n index_fsed = np.argwhere(fsed_unique == param_fsed[i])[0][0]\n spec_select.append(index_fsed)\n\n spec_select.append(...)\n\n spectrum[tuple(spec_select)] = flux[i]\n\n sorted_data = [teff_unique]\n\n if param_logg is not None:\n sorted_data.append(logg_unique)\n\n if param_feh is not None:\n sorted_data.append(feh_unique)\n\n if param_co is not None:\n sorted_data.append(co_unique)\n\n if param_fsed is not None:\n sorted_data.append(fsed_unique)\n\n sorted_data.append(wavelength)\n sorted_data.append(spectrum)\n\n return sorted_data\n\n\n@typechecked\ndef write_data(model: str,\n parameters: List[str],\n database: h5py._hl.files.File,\n data_sorted: List[np.ndarray]) -> None:\n \"\"\"\n Function for writing the model spectra and parameters to the database.\n\n Parameters\n ----------\n model : str\n Atmosphere model.\n parameters : list(str, )\n Model parameters.\n database: h5py._hl.files.File\n Database.\n data_sorted : list(np.ndarray, )\n Sorted model data with the parameter values, wavelength points (um), and flux\n densities (W m-2 um-1).\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n n_param = len(parameters)\n\n if f'models/{model}' in database:\n del database[f'models/{model}']\n\n dset = database.create_group(f'models/{model}')\n\n dset.attrs['n_param'] = n_param\n\n for i, item in enumerate(parameters):\n dset.attrs[f'parameter{i}'] = item\n\n database.create_dataset(f'models/{model}/{item}',\n data=data_sorted[i])\n\n database.create_dataset(f'models/{model}/wavelength',\n data=data_sorted[n_param])\n\n database.create_dataset(f'models/{model}/flux',\n data=data_sorted[n_param+1])\n\n\n@typechecked\ndef add_missing(model: str,\n parameters: List[str],\n database: h5py._hl.files.File) -> None:\n \"\"\"\n Function for adding missing grid points with a linear interpolation.\n\n Parameters\n ----------\n model : str\n Atmosphere model.\n parameters : list(str, )\n Model parameters.\n database : h5py._hl.files.File\n Database.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n print('Number of grid points per parameter:')\n\n grid_shape = []\n param_data = []\n\n for i, item in enumerate(parameters):\n grid_shape.append(database[f'models/{model}/{item}'].shape[0])\n param_data.append(np.asarray(database[f'models/{model}/{item}']))\n print(f' - {item}: {grid_shape[i]}')\n\n flux = np.asarray(database[f'models/{model}/flux']) # (W m-1 um-1)\n flux = np.log10(flux)\n\n count_total = 0\n count_interp = 0\n count_missing = 0\n\n if len(parameters) == 1:\n # Blackbody spectra\n pass\n\n elif len(parameters) == 2:\n find_missing = np.zeros(grid_shape, dtype=bool)\n\n values = []\n points = [[], []]\n new_points = [[], []]\n\n print('Fix missing grid points with a linear interpolation:')\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n if np.isinf(np.sum(flux[i, j, ...])):\n print(' - ', end='')\n print(f'{parameters[0]} = {param_data[0][i]}, ', end='')\n print(f'{parameters[1]} = {param_data[1][j]}')\n\n if 0 < i < grid_shape[0]-1:\n check_low = np.isinf(np.sum(flux[i-1, j, ...]))\n check_up = np.isinf(np.sum(flux[i+1, j, ...]))\n\n # Linear scaling of the intermediate Teff point\n scaling = (param_data[0][i] - param_data[0][i-1]) / \\\n (param_data[0][i+1] - param_data[0][i-1])\n\n if not check_low and not check_up:\n flux_low = flux[i-1, j, ...]\n flux_up = flux[i+1, j, ...]\n flux[i, j, ...] = flux_low*(1.-scaling) + flux_up*scaling\n count_interp += 1\n\n else:\n find_missing[i, j] = True\n\n else:\n find_missing[i, j] = True\n\n else:\n points[0].append(param_data[0][i])\n points[1].append(param_data[1][j])\n\n values.append(flux[i, j, ...])\n\n new_points[0].append(param_data[0][i])\n new_points[1].append(param_data[1][j])\n\n count_total += 1\n\n values = np.asarray(values)\n points = np.asarray(points)\n new_points = np.asarray(new_points)\n\n if np.sum(find_missing) > 0:\n flux_int = griddata(points.T, values, new_points.T, method='linear', fill_value=np.nan)\n\n count = 0\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n if np.isnan(np.sum(flux_int[count, :])):\n count_missing += 1\n\n elif np.isinf(np.sum(flux[i, j, ...])):\n flux[i, j, :] = flux_int[count, :]\n count_interp += 1\n\n count += 1\n\n if count_missing > 0:\n print(f'Could not interpolate {count_missing} grid points so storing zeros '\n f'instead. [WARNING]\\nThe grid points that are missing:')\n\n for i in range(flux_int.shape[0]):\n if np.isnan(np.sum(flux_int[i, :])):\n print(' - ', end='')\n print(f'{parameters[0]} = {new_points[0][i]}, ', end='')\n print(f'{parameters[1]} = {new_points[1][i]}')\n\n elif len(parameters) == 3:\n find_missing = np.zeros(grid_shape, dtype=bool)\n\n values = []\n points = [[], [], []]\n new_points = [[], [], []]\n\n print('Fix missing grid points with a linear interpolation:')\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n for k in range(grid_shape[2]):\n if np.isinf(np.sum(flux[i, j, k, ...])):\n print(' - ', end='')\n print(f'{parameters[0]} = {param_data[0][i]}, ', end='')\n print(f'{parameters[1]} = {param_data[1][j]}, ', end='')\n print(f'{parameters[2]} = {param_data[2][k]}')\n\n if 0 < i < grid_shape[0]-1:\n check_low = np.isinf(np.sum(flux[i-1, j, k, ...]))\n check_up = np.isinf(np.sum(flux[i+1, j, k, ...]))\n\n # Linear scaling of the intermediate Teff point\n scaling = (param_data[0][i] - param_data[0][i-1]) / \\\n (param_data[0][i+1] - param_data[0][i-1])\n\n if not check_low and not check_up:\n flux_low = flux[i-1, j, k, ...]\n flux_up = flux[i+1, j, k, ...]\n flux[i, j, k, ...] = flux_low*(1.-scaling) + flux_up*scaling\n count_interp += 1\n\n else:\n find_missing[i, j, k] = True\n\n else:\n find_missing[i, j, k] = True\n\n else:\n points[0].append(param_data[0][i])\n points[1].append(param_data[1][j])\n points[2].append(param_data[2][k])\n\n values.append(flux[i, j, k, ...])\n\n new_points[0].append(param_data[0][i])\n new_points[1].append(param_data[1][j])\n new_points[2].append(param_data[2][k])\n\n count_total += 1\n\n values = np.asarray(values)\n points = np.asarray(points)\n new_points = np.asarray(new_points)\n\n if np.sum(find_missing) > 0:\n flux_int = griddata(points.T, values, new_points.T, method='linear', fill_value=np.nan)\n\n count = 0\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n for k in range(grid_shape[2]):\n if np.isnan(np.sum(flux_int[count, :])):\n count_missing += 1\n\n elif np.isinf(np.sum(flux[i, j, k, ...])):\n flux[i, j, k, :] = flux_int[count, :]\n count_interp += 1\n\n count += 1\n\n if count_missing > 0:\n print(f'Could not interpolate {count_missing} grid points so storing zeros '\n f'instead. [WARNING]\\nThe grid points that are missing:')\n\n for i in range(flux_int.shape[0]):\n if np.isnan(np.sum(flux_int[i, :])):\n print(' - ', end='')\n print(f'{parameters[0]} = {new_points[0][i]}, ', end='')\n print(f'{parameters[1]} = {new_points[1][i]}, ', end='')\n print(f'{parameters[2]} = {new_points[2][i]}')\n\n elif len(parameters) == 4:\n find_missing = np.zeros(grid_shape, dtype=bool)\n\n values = []\n points = [[], [], [], []]\n new_points = [[], [], [], []]\n\n print('Fix missing grid points with a linear interpolation:')\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n for k in range(grid_shape[2]):\n for m in range(grid_shape[3]):\n if np.isinf(np.sum(flux[i, j, k, m, ...])):\n print(' - ', end='')\n print(f'{parameters[0]} = {param_data[0][i]}, ', end='')\n print(f'{parameters[1]} = {param_data[1][j]}, ', end='')\n print(f'{parameters[2]} = {param_data[2][k]}, ', end='')\n print(f'{parameters[3]} = {param_data[3][m]}')\n\n if 0 < i < grid_shape[0]-1:\n check_low = np.isinf(np.sum(flux[i-1, j, k, m, ...]))\n check_up = np.isinf(np.sum(flux[i+1, j, k, m, ...]))\n\n # Linear scaling of the intermediate Teff point\n scaling = (param_data[0][i] - param_data[0][i-1]) / \\\n (param_data[0][i+1] - param_data[0][i-1])\n\n if not check_low and not check_up:\n flux_low = flux[i-1, j, k, m, ...]\n flux_up = flux[i+1, j, k, m, ...]\n flux[i, j, k, m, ...] = flux_low*(1.-scaling) + flux_up*scaling\n count_interp += 1\n\n else:\n find_missing[i, j, k, m] = True\n\n else:\n find_missing[i, j, k, m] = True\n\n else:\n points[0].append(param_data[0][i])\n points[1].append(param_data[1][j])\n points[2].append(param_data[2][k])\n points[3].append(param_data[3][m])\n\n values.append(flux[i, j, k, m, ...])\n\n new_points[0].append(param_data[0][i])\n new_points[1].append(param_data[1][j])\n new_points[2].append(param_data[2][k])\n new_points[3].append(param_data[3][m])\n\n count_total += 1\n\n values = np.asarray(values)\n points = np.asarray(points)\n new_points = np.asarray(new_points)\n\n if np.sum(find_missing) > 0:\n flux_int = griddata(points.T, values, new_points.T, method='linear', fill_value=np.nan)\n\n count = 0\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n for k in range(grid_shape[2]):\n for m in range(grid_shape[3]):\n if np.isnan(np.sum(flux_int[count, :])):\n count_missing += 1\n\n elif np.isinf(np.sum(flux[i, j, k, m, ...])):\n flux[i, j, k, m, :] = flux_int[count, :]\n count_interp += 1\n\n count += 1\n\n if count_missing > 0:\n print(f'Could not interpolate {count_missing} grid points so storing zeros '\n f'instead. [WARNING]\\nThe grid points that are missing:')\n\n for i in range(flux_int.shape[0]):\n if np.isnan(np.sum(flux_int[i, :])):\n print(' - ', end='')\n print(f'{parameters[0]} = {new_points[0][i]}, ', end='')\n print(f'{parameters[1]} = {new_points[1][i]}, ', end='')\n print(f'{parameters[2]} = {new_points[2][i]}, ', end='')\n print(f'{parameters[3]} = {new_points[3][i]}')\n\n # ran_par_0 = np.random.randint(grid_shape[0], size=1000)\n # ran_par_1 = np.random.randint(grid_shape[1], size=1000)\n # ran_par_2 = np.random.randint(grid_shape[2], size=1000)\n # ran_par_3 = np.random.randint(grid_shape[3], size=1000)\n #\n # for z in range(ran_par_0.shape[0]):\n # i = ran_par_0[z]\n # j = ran_par_1[z]\n # k = ran_par_2[z]\n # m = ran_par_3[z]\n #\n # if 0 < i < grid_shape[0]-1:\n # check_low = np.isinf(np.sum(flux[i-1, j, k, m, ...]))\n # check_up = np.isinf(np.sum(flux[i+1, j, k, m, ...]))\n #\n # # Linear scaling of the intermediate Teff point\n # scaling = (param_data[0][i] - param_data[0][i-1]) / \\\n # (param_data[0][i+1] - param_data[0][i-1])\n #\n # if not check_low and not check_up:\n # flux_low = flux[i-1, j, k, m, ...]\n # flux_up = flux[i+1, j, k, m, ...]\n # flux[i, j, k, m, ...] = flux_low*(1.-scaling) + flux_up*scaling\n\n elif len(parameters) == 5:\n find_missing = np.zeros(grid_shape, dtype=bool)\n\n values = []\n points = [[], [], [], [], []]\n new_points = [[], [], [], [], []]\n\n print('Fix missing grid points with a linear interpolation:')\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n for k in range(grid_shape[2]):\n for m in range(grid_shape[3]):\n for n in range(grid_shape[4]):\n if np.isinf(np.sum(flux[i, j, k, m, n, ...])):\n print(' - ', end='')\n print(f'{parameters[0]} = {param_data[0][i]}, ', end='')\n print(f'{parameters[1]} = {param_data[1][j]}, ', end='')\n print(f'{parameters[2]} = {param_data[2][k]}, ', end='')\n print(f'{parameters[3]} = {param_data[3][m]}, ', end='')\n print(f'{parameters[4]} = {param_data[4][n]}')\n\n if 0 < i < grid_shape[0]-1:\n check_low = np.isinf(np.sum(flux[i-1, j, k, m, n, ...]))\n check_up = np.isinf(np.sum(flux[i+1, j, k, m, n, ...]))\n\n # Linear scaling of the intermediate Teff point\n scaling = (param_data[0][i] - param_data[0][i-1]) / \\\n (param_data[0][i+1] - param_data[0][i-1])\n\n if not check_low and not check_up:\n flux_low = flux[i-1, j, k, m, n, ...]\n flux_up = flux[i+1, j, k, m, n, ...]\n flux[i, j, k, m, n, ...] = flux_low*(1.-scaling) + \\\n flux_up*scaling\n count_interp += 1\n\n else:\n find_missing[i, j, k, m, n] = True\n\n else:\n find_missing[i, j, k, m, n] = True\n\n else:\n points[0].append(param_data[0][i])\n points[1].append(param_data[1][j])\n points[2].append(param_data[2][k])\n points[3].append(param_data[3][m])\n points[4].append(param_data[4][n])\n\n values.append(flux[i, j, k, m, n, ...])\n\n new_points[0].append(param_data[0][i])\n new_points[1].append(param_data[1][j])\n new_points[2].append(param_data[2][k])\n new_points[3].append(param_data[3][m])\n new_points[4].append(param_data[4][n])\n\n count_total += 1\n\n values = np.asarray(values)\n points = np.asarray(points)\n new_points = np.asarray(new_points)\n\n if np.sum(find_missing) > 0:\n flux_int = griddata(points.T, values, new_points.T, method='linear', fill_value=np.nan)\n\n count = 0\n\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n for k in range(grid_shape[2]):\n for m in range(grid_shape[3]):\n for n in range(grid_shape[4]):\n if np.isnan(np.sum(flux_int[count, :])):\n count_missing += 1\n\n elif np.isinf(np.sum(flux[i, j, k, m, n, ...])):\n flux[i, j, k, m, n, :] = flux_int[count, :]\n count_interp += 1\n\n count += 1\n\n if count_missing > 0:\n print(f'Could not interpolate {count_missing} grid points so storing zeros '\n f'instead. [WARNING]\\nThe grid points that are missing:')\n\n for i in range(flux_int.shape[0]):\n if np.isnan(np.sum(flux_int[i, :])):\n print(' - ', end='')\n print(f'{parameters[0]} = {new_points[0][i]}, ', end='')\n print(f'{parameters[1]} = {new_points[1][i]}, ', end='')\n print(f'{parameters[2]} = {new_points[2][i]}, ', end='')\n print(f'{parameters[3]} = {new_points[3][i]}, ', end='')\n print(f'{parameters[4]} = {new_points[4][i]}')\n\n else:\n raise ValueError('The add_missing function is currently not compatible with more than 5 '\n 'model parameters.')\n\n print(f'Number of stored grid points: {count_total}')\n print(f'Number of interpolated grid points: {count_interp}')\n print(f'Number of missing grid points: {count_missing}')\n\n del database[f'models/{model}/flux']\n database.create_dataset(f'models/{model}/flux', data=10.**flux)\n\n\ndef correlation_to_covariance(cor_matrix,\n spec_sigma):\n \"\"\"\n Parameters\n ----------\n cor_matrix : np.ndarray\n Correlation matrix of the spectrum.\n spec_sigma : np.ndarray\n Uncertainties (W m-2 um-1).\n\n Returns\n -------\n np.ndarrays\n Covariance matrix of the spectrum.\n \"\"\"\n\n cov_matrix = np.zeros(cor_matrix.shape)\n\n for i in range(cor_matrix.shape[0]):\n for j in range(cor_matrix.shape[1]):\n cov_matrix[i, j] = cor_matrix[i, j]*spec_sigma[i]*spec_sigma[j]\n\n if i == j:\n assert cor_matrix[i, j] == 1.\n\n return cov_matrix\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.argwhere", "scipy.interpolate.griddata", "numpy.log10", "numpy.unique" ] ]
fgitmichael/SelfSupevisedSkillDiscovery
[ "60eee11cfd67046190dd2784bf40e97bdbed9d40" ]
[ "seqwise_cont_highdimusingvae/main_seqwise_cont_skillspace_2d_nav_highdimusingvae_mcar_single_dims.py" ]
[ "import argparse\nimport torch\nimport numpy as np\nimport copy\n#from gym.envs.mujoco import HalfCheetahEnv\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.launchers.launcher_util import setup_logger\n\nfrom self_supervised.env_wrapper.rlkit_wrapper import NormalizedBoxEnvWrapper\nfrom self_supervised.utils.writer import MyWriterWithActivation\nfrom self_supervised.network.flatten_mlp import FlattenMlp as \\\n MyFlattenMlp\nfrom self_sup_combined.base.writer.diagnostics_writer import DiagnosticsWriter\n\nfrom self_supervised.memory.self_sup_replay_buffer import \\\n SelfSupervisedEnvSequenceReplayBuffer\n\nfrom diayn_seq_code_revised.policies.skill_policy import \\\n SkillTanhGaussianPolicyRevised, MakeDeterministicRevised\nfrom diayn_seq_code_revised.networks.my_gaussian import \\\n ConstantGaussianMultiDim\nfrom diayn_orig_cont_highdimusingvae.data_collector.\\\n skill_selector_cont_highdimusingvae import SkillSelectorContinousHighdimusingvae\n\nfrom seqwise_cont_skillspace.utils.info_loss import InfoLoss\nfrom seqwise_cont_skillspace.data_collector.seq_collector_optional_skill_id import \\\n SeqCollectorRevisedOptionalSkillId\n\nfrom two_d_navigation_demo.env.navigation_env import TwoDimNavigationEnv\n\nfrom seqwise_cont_highdimusingvae.algo.seqwise_cont_algo_highdimusingvae import \\\n SeqwiseAlgoRevisedContSkillsHighdimusingvae\nfrom seqwise_cont_highdimusingvae.networks.df_highdimusingvae_single_dims \\\n import RnnStepwiseSeqwiseClassifierHduvaeSingleDims\nfrom seqwise_cont_highdimusingvae.trainer.seqwise_cont_hduvae_single_dims_trainer \\\n import ContSkillTrainerSeqwiseStepwiseHighdimusingvaeSingleDims\n\n\ndef experiment(variant, args):\n expl_env = NormalizedBoxEnvWrapper(gym_id=str(args.env))\n eval_env = copy.deepcopy(expl_env)\n obs_dim = expl_env.observation_space.low.size\n action_dim = eval_env.action_space.low.size\n\n step_training_repeat = 1\n seq_len = 30\n skill_dim = args.skill_dim\n hidden_size_rnn = 20\n latent_dim = 2\n feature_size = 20\n hidden_sizes_classifier_seq = [200, 200]\n hidden_sizes_feature_dim_matcher = [200, 200]\n hidden_sizes_feature_to_latent_encoder = [200, 200]\n hidden_sizes_latent_to_skill_decoder = [200, 200]\n variant['algorithm_kwargs']['batch_size'] //= seq_len\n\n test_script_path_name = \"/home/michael/EIT/Github_Repos/24_SelfSupervisedDevel/\" \\\n \"cont_skillspace_test/main_test_hduvae_normal_render.py\"\n\n sep_str = \" | \"\n run_comment = sep_str\n run_comment += \"seq_len: {}\".format(seq_len) + sep_str\n run_comment += \"continous skill space\" + sep_str\n run_comment += \"hidden rnn_dim: {}{}\".format(hidden_size_rnn, sep_str)\n run_comment += \"step training repeat: {}\".format(step_training_repeat)\n\n seed = 0\n torch.manual_seed = seed\n expl_env.seed(seed)\n eval_env.seed(seed)\n np.random.seed(seed)\n\n M = variant['layer_size']\n qf1 = MyFlattenMlp(\n input_size=obs_dim + action_dim + skill_dim,\n output_size=1,\n hidden_sizes=[M, M],\n )\n qf2 = MyFlattenMlp(\n input_size=obs_dim + action_dim + skill_dim,\n output_size=1,\n hidden_sizes=[M, M],\n )\n target_qf1 = MyFlattenMlp(\n input_size=obs_dim + action_dim + skill_dim,\n output_size=1,\n hidden_sizes=[M, M],\n )\n target_qf2 = MyFlattenMlp(\n input_size=obs_dim + action_dim + skill_dim,\n output_size=1,\n hidden_sizes=[M, M],\n )\n df = RnnStepwiseSeqwiseClassifierHduvaeSingleDims(\n input_size=obs_dim,\n hidden_size_rnn=hidden_size_rnn,\n feature_size=feature_size,\n skill_dim=skill_dim,\n hidden_sizes_classifier_seq=hidden_sizes_classifier_seq,\n hidden_sizes_classifier_step=None,\n hidden_size_feature_dim_matcher=hidden_sizes_feature_dim_matcher,\n seq_len=seq_len,\n pos_encoder_variant='transformer',\n dropout=0.2,\n hidden_sizes_feature_to_latent_encoder=hidden_sizes_feature_to_latent_encoder,\n hidden_sizes_latent_to_skill_decoder=hidden_sizes_latent_to_skill_decoder,\n )\n policy = SkillTanhGaussianPolicyRevised(\n obs_dim=obs_dim,\n action_dim=action_dim,\n skill_dim=skill_dim,\n hidden_sizes=[M, M],\n )\n eval_policy = MakeDeterministicRevised(policy)\n skill_prior = ConstantGaussianMultiDim(\n output_dim=latent_dim\n )\n skill_selector = SkillSelectorContinousHighdimusingvae(\n prior_skill_dist=skill_prior,\n df_vae_regressor=df.classifier,\n skill_dim=skill_dim,\n )\n eval_path_collector = SeqCollectorRevisedOptionalSkillId(\n eval_env,\n eval_policy,\n max_seqs=50,\n skill_selector=skill_selector\n )\n expl_step_collector = SeqCollectorRevisedOptionalSkillId(\n expl_env,\n policy,\n max_seqs=50,\n skill_selector=skill_selector\n )\n seq_eval_collector = SeqCollectorRevisedOptionalSkillId(\n env=eval_env,\n policy=eval_policy,\n max_seqs=50,\n skill_selector=skill_selector\n )\n replay_buffer = SelfSupervisedEnvSequenceReplayBuffer(\n max_replay_buffer_size=variant['replay_buffer_size'],\n seq_len=seq_len,\n mode_dim=skill_dim,\n env=expl_env,\n )\n info_loss_fun = InfoLoss(\n alpha=0.97,\n lamda=0.3,\n ).loss\n trainer = ContSkillTrainerSeqwiseStepwiseHighdimusingvaeSingleDims(\n skill_prior_dist=skill_prior,\n loss_fun=info_loss_fun,\n env=eval_env,\n policy=policy,\n qf1=qf1,\n qf2=qf2,\n df=df,\n target_qf1=target_qf1,\n target_qf2=target_qf2,\n **variant['trainer_kwargs']\n )\n\n writer = MyWriterWithActivation(\n seed=seed,\n log_dir='mcar_singledims',\n run_comment=run_comment\n )\n diagno_writer = DiagnosticsWriter(\n writer=writer,\n log_interval=1,\n scripts_to_copy=test_script_path_name,\n )\n\n algorithm = SeqwiseAlgoRevisedContSkillsHighdimusingvae(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_step_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n\n seq_len=seq_len,\n\n diagnostic_writer=diagno_writer,\n seq_eval_collector=seq_eval_collector,\n\n **variant['algorithm_kwargs']\n )\n algorithm.to(ptu.device)\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--env',\n type=str,\n default=\"MountainCarContinuous-v0\",\n help='environment'\n )\n parser.add_argument('--skill_dim',\n type=int,\n default=3,\n help='skill dimension'\n )\n args = parser.parse_args()\n\n # noinspection PyTypeChecker\n variant = dict(\n algorithm=\"DIAYN\",\n version=\"normal\",\n layer_size=256,\n replay_buffer_size=int(1E6),\n algorithm_kwargs=dict(\n num_epochs=1000,\n num_eval_steps_per_epoch=5000,\n num_trains_per_train_loop=10,\n num_expl_steps_per_train_loop=10,\n min_num_steps_before_training=1000,\n max_path_length=1000,\n batch_size=500,\n ),\n trainer_kwargs=dict(\n discount=0.99,\n soft_target_tau=5e-3,\n target_update_period=1,\n policy_lr=3E-4,\n qf_lr=3E-4,\n reward_scale=1,\n use_automatic_entropy_tuning=True,\n df_lr_step=9E-4,\n df_lr_seq=8E-4,\n ),\n )\n setup_logger('DIAYN_' + str(args.skill_dim) + '_' + args.env, variant=variant)\n ptu.set_gpu_mode(True) # optionally set the GPU (default=False)\n experiment(variant, args)\n" ]
[ [ "numpy.random.seed" ] ]
tianyunchn/web-crawler
[ "5e8d95a2f6885b5970d3f4b010145442af341433" ]
[ "selenium-support.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 6 15:04:11 2019\n\n@author: Administrator\n\"\"\"\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport pandas as pd\nfrom pandas import ExcelWriter\n\n\ncat_dir = 'C:/Users/Administrator/Desktop/็ซžๅ“ID.xlsx'\ndf = pd.read_excel(cat_dir,header = None)\nchanpin = []\nfor i in range(1,df[1].size):\n chanpin.append([df[1][i],df[2][i]])\n\nurl = 'https://detail.tmall.hk/hk/item.htm?id='\n\ndef getHtml(url, loadmore = False, waittime = 1):\n # browser = webdriver.Firefox()\n browser = webdriver.Chrome('chromedriver')\n browser.get(url)\n time.sleep(waittime)\n html = browser.page_source\n browser.quit()\n return html\n\n \n\nif __name__ == \"__main__\":\n collection = []\n for product in chanpin:\n url_temp= url+str(product[0])\n html = getHtml(url_temp, loadmore = False, waittime = 1)\n result = BeautifulSoup(html,features=\"lxml\").text\n if product[1] == 'ๅคฉ็Œซ':\n collection.append(re.findall(r\"ๆœˆ้”€้‡(.+?)\\n\",result))\n else:\n collection.append(re.findall(r\"\\n(.+?)\\nไบคๆ˜“ๆˆๅŠŸ\",result))\n\n \n \n for i in range(1,df[1].size):\n df[1][i] = str(df[1][i])\n df[3][i] = str(collection[i-1][0])\n \n with ExcelWriter('็ซžๅ“้”€้‡.xlsx') as writer:\n df.to_excel(writer, sheet_name='sheet2',index = None,header = None)\n \n" ]
[ [ "pandas.read_excel", "pandas.ExcelWriter" ] ]
USTCEarthDefense/SNBTD
[ "b862230b7c7f36578ac94ee228083f7d2e9c6dd9" ]
[ "code/SSGP_classifier.py" ]
[ "import os\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nimport numpy as np\nimport tensorflow as tf\nimport common_funcs\nfrom common_funcs import FLOAT_TYPE\nimport data_loader\nfrom sklearn.cluster import KMeans\n\nimport time\nimport joblib as jb\n\n# Streaming Sparse Gaussian Tensor Decomposition\n# By ZMP\n\nimport sys\n#run as\nprint(\"usage : python *.py rank=3 batch_size=256 dataset=dblp\")\n\nprint('start')\nprint( sys.argv)\n#parse args\npy_name = sys.argv[0]\n\nargs = sys.argv[1:]\nargs_dict = {}\nfor arg_pair in args:\n arg, val_str = arg_pair.split( '=')\n args_dict[ arg] = val_str.strip()\n\narg_rank = int( args_dict['rank'])\narg_data_name = args_dict['dataset']\narg_batch_size = int( args_dict['batch_size'])\n\n\n\nclass SSGP_CLF:\n\n def __init__(self, init_config):\n\n #Model configuration parameters\n self.num_pseudo_points = init_config['num_pseudo_points']\n self.rank = init_config['rank']\n self.init_method = init_config['init_method']\n self.elem_sizes = init_config['elem_sizes'] # list, number of elements( users, items, ...)\n self.learning_rate = init_config['learning_rate']\n self.N_data_points = init_config['N_data_points']\n\n if 'saved_model' in init_config:\n saved_model = init_config['saved_model']\n self.init_mu_U = saved_model['mu_U']\n self.init_std_vec_U = saved_model['std_vec_U']\n self.fix_U = True\n else:\n self.fix_U = False\n\n self.num_mods = len( self.elem_sizes)\n self.num_factors = np.sum( self.elem_sizes)\n self.rank_psd_input = self.num_mods * self.rank # Will be different if use neural kernel\n self.tf_initializer = common_funcs.get_initializer(self.init_method, args = None)\n\n #Parameters\n self.PARAS_SCOPE_NAME = 'PARAS'\n with tf.variable_scope( self.PARAS_SCOPE_NAME):\n\n if self.fix_U:\n self.tf_mu_U = [ tf.constant( self.init_mu_U[i], dtype = FLOAT_TYPE) for i in range( self.num_mods)]\n self.tf_std_vec_U = [ tf.constant( self.init_std_vec_U[i], dtype=FLOAT_TYPE) for i in range( self.num_mods)]\n else:\n #Embeddings initialized by default initlizer\n self.tf_mu_U = [tf.Variable(np.random.randn( num_elem, self.rank) * 1.0, dtype=FLOAT_TYPE) for num_elem in self.elem_sizes]\n self.tf_std_vec_U = [ tf.Variable( np.ones( shape = [ num_elem, self.rank]) * 0.1, dtype=FLOAT_TYPE) for num_elem in self.elem_sizes] #var = diag( std * std )\n\n self.B_init_holder = tf.placeholder( dtype=FLOAT_TYPE, shape=[ self.num_pseudo_points, self.rank_psd_input])\n self.tf_B = tf.Variable( initial_value=self.B_init_holder)\n\n self.tf_post_mu_b = tf.Variable(tf.random.normal( shape = [self.num_pseudo_points, 1], dtype=FLOAT_TYPE), dtype=FLOAT_TYPE)\n self.tf_post_Ltrig_b = tf.linalg.band_part(tf.Variable(np.eye( self.num_pseudo_points), dtype=FLOAT_TYPE), -1, 0)\n\n #Kernel parameters. ARD\n self.tf_log_lengthscale = tf.Variable(np.zeros(shape = [self.rank_psd_input, 1]), dtype=FLOAT_TYPE)\n self.tf_log_amp = tf.Variable(0.0, dtype=FLOAT_TYPE)\n\n #Place holders\n self.batch_inds = tf.placeholder(dtype=tf.int32, shape=[None, self.num_mods])\n self.batch_rates = tf.placeholder(dtype=FLOAT_TYPE, shape=[None, ])\n self.batch_uniq_fac_inds = [tf.placeholder( dtype=tf.int32,shape= [None,] ) for _ in range( self.num_mods)]\n\n #Old values. Be aware, Complicated logic here. Becareful to modify.\n self.mu_b_old_ori = tf.Variable( np.zeros( shape=[self.num_pseudo_points,1]), dtype=FLOAT_TYPE)\n self.mu_b_old = tf.stop_gradient(self.mu_b_old_ori )\n\n self.Ltrig_b_old_ori_init_holder = tf.placeholder( dtype=FLOAT_TYPE, shape=[ self.num_pseudo_points, self.num_pseudo_points])\n self.Ltrig_b_old_ori = tf.Variable(self.Ltrig_b_old_ori_init_holder , dtype=FLOAT_TYPE)\n self.Ltrig_b_old = tf.stop_gradient( self.Ltrig_b_old_ori)\n\n self.Kmm_old_ori = tf.Variable( np.zeros( shape = [ self.num_pseudo_points, self.num_pseudo_points]), dtype=FLOAT_TYPE)\n self.Kmm_old = tf.stop_gradient( self.Kmm_old_ori)\n\n self.mu_U_old_ori = [ tf.Variable( np.zeros( shape = [ num_elem, self.rank]), dtype=FLOAT_TYPE) for num_elem in self.elem_sizes]\n self.mu_U_old = [ tf.stop_gradient( self.mu_U_old_ori[k]) for k in range( self.num_mods)]\n\n self.std_vec_U_old_ori = [tf.Variable(np.zeros(shape = [num_elem, self.rank]), dtype=FLOAT_TYPE) for num_elem in self.elem_sizes]\n self.std_vec_U_old = [tf.stop_gradient( self.std_vec_U_old_ori[k]) for k in range( self.num_mods)]\n\n self.var_normal_params_old_ori = tf.Variable(np.array([0, 1]), dtype=FLOAT_TYPE)\n self.var_normal_params_old = tf.stop_gradient(self.var_normal_params_old_ori)\n\n\n self.assign_old_values_op = [tf.assign( self.mu_b_old_ori, self.tf_post_mu_b), tf.assign( self.Ltrig_b_old_ori, self.tf_post_Ltrig_b)]\n\n self.assign_old_values_op = self.assign_old_values_op + [ tf.assign( self.mu_U_old_ori[k], self.tf_mu_U[k]) for k in range( self.num_mods)] + \\\n [tf.assign(self.std_vec_U_old_ori[k], self.tf_std_vec_U[k]) for k in range( self.num_mods)]\n\n self.sub_batch_size = self.N_data_points\n\n #sample posterior embeddings\n sampled_embeddings, self.batch_mean, self.batch_std_vec = common_funcs.sample_embeddings( self.tf_mu_U, self.tf_std_vec_U, self.batch_inds, return_batch_info= True)\n self.sampled_X = tf.concat( sampled_embeddings, axis=1)\n '''\n Some neural kernel transform here if using neural kernel\n '''\n self.Kmm = common_funcs.kernel_cross_tf(self.tf_B, self.tf_B, self.tf_log_amp, self.tf_log_lengthscale)# + MATRIX_JITTER * tf.eye( self.num_pseudo_points)\n self.Knm = common_funcs.kernel_cross_tf(self.sampled_X, self.tf_B, self.tf_log_amp, self.tf_log_lengthscale)\n self.assign_old_values_op.append( tf.assign( self.Kmm_old_ori, self.Kmm))\n\n post_sample_f, f_std = common_funcs.sample_sparse_f( self.tf_post_mu_b, self.tf_post_Ltrig_b, self.Kmm, self.Knm, self.tf_log_amp, return_std=True) #[batch_size, 1]\n self.post_sample_f = tf.reshape(post_sample_f, shape=[-1]) # [ batch_size,]\n\n #MLE sample of f. Used in prediction\n self.f_mle = tf.reshape( self.Knm @ tf.linalg.solve( self.Kmm, self.tf_post_mu_b), shape=[-1])\n self.f_std = tf.reshape( f_std, shape = [-1])\n\n self.data_fidelity = self.sub_batch_size * tf.reduce_mean(- tf.nn.sigmoid_cross_entropy_with_logits( labels=self.batch_rates, logits=self.post_sample_f))\n\n # KL U\n # Note this is biased, because uniformly sampling from rating is not equivalent to uniformly sampling from factors\n uniq_mu_U = common_funcs.get_uniq_factors(self.tf_mu_U, self.batch_uniq_fac_inds)\n uniq_std_vec_U = common_funcs.get_uniq_factors(self.tf_std_vec_U, self.batch_uniq_fac_inds)\n\n uniq_mu_U_old = common_funcs.get_uniq_factors( self.mu_U_old, self.batch_uniq_fac_inds)\n uniq_std_vec_U_old = common_funcs.get_uniq_factors( self.std_vec_U_old, self.batch_uniq_fac_inds)\n\n self.batch_KL_U = common_funcs.KL_Gaussian_std_vec_tf(tf.concat(uniq_mu_U, axis=0),\n tf.concat(uniq_std_vec_U, axis=0),\n tf.concat(uniq_mu_U_old, axis=0),\n tf.concat(uniq_std_vec_U_old, axis=0), self.rank)\n self.KL_U = self.batch_KL_U\n\n # KL( q(b)|| p(b))\n self.KL_q_pb_new = common_funcs.KL_pseudo_output(self.Kmm, self.tf_post_Ltrig_b, self.tf_post_mu_b,\n self.num_pseudo_points)\n # KL( q(b) || q(b)_old)\n self.KL_q_qb_old = common_funcs.KL_Gaussian_Ltrig_tf( self.tf_post_mu_b, self.tf_post_Ltrig_b, self.mu_b_old, self.Ltrig_b_old, self.num_pseudo_points)\n\n # KL ( q(b) || p(b)_old)\n self.KL_q_pb_old = common_funcs.KL_pseudo_output( self.Kmm_old, self.tf_post_Ltrig_b, self.tf_post_mu_b,self.num_pseudo_points)\n\n self.KL_b = self.KL_q_qb_old + self.KL_q_pb_new - self.KL_q_pb_old\n\n # Loss functions\n self.ELBO = self.data_fidelity - self.KL_b - self.KL_U\n\n #Session settings\n self.min_opt = tf.train.AdamOptimizer(self.learning_rate)\n self.min_step = self.min_opt.minimize(- self.ELBO)\n\n self.train_hist = []\n # GPU settings\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n self.run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)\n\n #Pre-initialize pseudo input\n self.sess.run( tf.global_variables_initializer(), feed_dict={ self.B_init_holder : np.random.randn(self.num_pseudo_points, self.rank_psd_input),\n self.Ltrig_b_old_ori_init_holder : np.random.randn( self.num_pseudo_points, self.num_pseudo_points)} )\n self.is_B_initialized = False\n\n def _get_init_pseudo_input(self, inds):\n max_num_input_points = self.num_pseudo_points * 100\n if len( inds) > max_num_input_points:\n arg_random = np.random.permutation( len( inds))\n inds = inds[ arg_random[ : max_num_input_points]]\n\n X = self.sess.run( self.sampled_X, feed_dict={ self.batch_inds : inds})\n\n kmeans = KMeans( n_clusters = self.num_pseudo_points, n_jobs=-1)\n _ = kmeans.fit(X)\n\n return kmeans.cluster_centers_\n\n def _fit(self, inds, rates, batch_size, num_iters_per_batch, print_every_by_iters):\n\n num_batches = int( len( inds / batch_size))\n self.batch_X_y_gnrt = common_funcs.DataGenerator(inds, rates, shuffle=True)\n\n for n_batch in range( 1, num_batches + 1):\n batch_inds, batch_rates = self.batch_X_y_gnrt.draw_next(batch_size)\n\n self.fit_batch( batch_inds, batch_rates, num_iters_per_batch, print_every = print_every_by_iters)\n\n def fit_batch(self, inds, rates, steps, print_every = 100, clean_hist = True, verbose = True ):\n start_time = time.time()\n if not self.is_B_initialized:\n # Initialized model using\n print('Re-initializing B using Kmeans')\n\n cluster_centers = self._get_init_pseudo_input( inds)\n self.sess.run( self.tf_B.initializer, feed_dict = { self.B_init_holder : cluster_centers})\n self.is_B_initialized = True\n\n # update old posteriors and hyper-parameters\n _ = self.sess.run(self.assign_old_values_op)\n\n init_Kmm = self.sess.run(self.Kmm)\n L = np.linalg.cholesky(init_Kmm)\n self.sess.run(self.Ltrig_b_old_ori, feed_dict={self.Ltrig_b_old_ori_init_holder: L})\n\n print(\"Re-initializing Done\")\n\n\n if clean_hist:\n self.train_hist = []\n\n # Get unique inds\n uniq_inds = [np.unique(inds[:, k]) for k in range(self.num_mods)]\n\n for step in range( 1, steps + 1):\n # Get unique inds\n\n train_feed = {self.batch_inds: inds, self.batch_rates: rates}\n for k in range( self.num_mods):\n train_feed[ self.batch_uniq_fac_inds[k]] = uniq_inds[k]\n\n ELBO, sampled_f, data_fidelity,KL_U, KL_b, batch_U_mean, batch_U_std_vec, _ = self.sess.run( [\n self.ELBO, self.post_sample_f, self.data_fidelity,self.KL_U,self.KL_b, self.batch_mean, self.batch_std_vec, self.min_step], feed_dict= train_feed, options= self.run_options)\n\n self.train_hist.append( ELBO)\n if step % print_every == 0 and verbose:\n print('true_rates: ', rates[:5])\n print('sampled logits: ', sampled_f[:5])\n print('sampled rates: ', (sampled_f[:5] >= 0).astype(np.float32))\n train_auc = common_funcs.metrics_auc( rates, sampled_f )\n print( '\\nstep = %d, ELBO = %g, data_fidelity = %g, -KL_U = %g, -KL_b = %g, train_auc = %g' % ( step, ELBO,data_fidelity, -KL_U, -KL_b,train_auc))\n\n # update old posteriors and hyper-parameters\n _ = self.sess.run(self.assign_old_values_op)\n end_time = time.time()\n\n if verbose:\n print( 'secs_per_entry = %e' % (( end_time - start_time)/ len( inds)))\n\n return self\n\n def predict_log_llk(self, inds, y, batch_size):\n N = len( inds)\n\n test_llk = []\n start_idx = 0\n end_idx = start_idx + batch_size\n while( start_idx < N):\n end_idx = min( end_idx, N)\n batch_inds = inds[ start_idx : end_idx]\n batch_y = y[ start_idx : end_idx]\n test_feed = { self.batch_inds : batch_inds}\n\n batch_f, batch_f_std = self.sess.run( [ self.f_mle, self.f_std], feed_dict=test_feed)\n f_var = batch_f_std ** 2\n\n kappa = np.sqrt( 1 + np.pi * f_var/ 8)\n p = common_funcs.sigmoid( kappa * batch_f)\n llk = batch_y * np.log( p) + ( 1 - batch_y) * np.log( 1 - p)\n test_llk.append(llk)\n\n start_idx += batch_size\n end_idx = start_idx + batch_size\n\n test_llk = np.concatenate( test_llk)\n assert len( test_llk) == N, \"Dims not match\"\n return test_llk\n\n\n\n def _batch_wise_predict(self, inds, batch_size, return_logits):\n y_pred_logits = []\n\n N = len(inds)\n start_idx = 0\n end_idx = start_idx + batch_size\n while (start_idx < N):\n end_idx = min(end_idx, N)\n batch_inds = inds[start_idx:end_idx]\n test_feed = {self.batch_inds: batch_inds}\n\n batch_y = self.sess.run(self.f_mle, feed_dict=test_feed)\n y_pred_logits.append(batch_y)\n\n start_idx += batch_size\n end_idx = start_idx + batch_size\n\n y_pred_logits = np.concatenate(y_pred_logits)\n y_pred = ( y_pred_logits >= 0).astype( np.float32)\n\n assert len(y_pred_logits) == N, \"prediction length not match\"\n\n if return_logits:\n return y_pred, y_pred_logits\n else:\n return y_pred\n\n def predict(self, inds, batch_size=None, return_logits = False):\n\n if batch_size is not None:\n return self._batch_wise_predict(inds, batch_size, return_logits)\n\n else:\n test_feed = {self.batch_inds: inds}\n y_pred_logits = self.sess.run(self.f_mle, feed_dict=test_feed)\n y_pred = (y_pred_logits >= 0).astype( np.float32)\n\n if return_logits:\n return y_pred, y_pred_logits\n else:\n return y_pred\n\n\ndef main():\n\n assert arg_data_name in ['dblp','anime'], 'Wrong data name %s' % (arg_data_name)\n if arg_data_name == 'dblp':\n data = data_loader.load_dblp()\n elif arg_data_name == 'anime':\n data = data_loader.load_anime_binary( )\n else:\n raise NameError('wrong data set: %s' % arg_data_name)\n\n train_inds = data['train_X']\n train_rates = data['train_y']\n test_inds = data['test_X']\n test_rates = data['test_y']\n data_name = data['name']\n elem_sizes = data['elem_sizes']\n\n\n\n N_train = len( train_rates)\n N_test = len(test_rates)\n print('elem size:', elem_sizes)\n print('pseudo N train = %d, true N train = %d' % (N_train, len(train_rates)))\n\n print(\"N train = %d, N test = %d\" % (N_train, N_test))\n print('mods = ', elem_sizes)\n\n # np.random.seed(47)\n # tf.random.set_random_seed( 47)\n\n #parameters settings--------------\n batch_size = arg_batch_size\n num_iters_per_batch = 100\n # init U\n init_config = {\n 'elem_sizes': elem_sizes,\n 'learning_rate': 1e-3,\n 'init_method': 'he_normal',\n 'rank': arg_rank,\n 'num_pseudo_points': 128,\n 'batch_size': batch_size,\n 'num_iters_per_batch': num_iters_per_batch,\n 'N_data_points': N_train,\n 'init_batch_size' : 2048\n }\n #end parameters settings----------\n\n\n if 'USER' in os.environ:\n user_name = os.environ['USER']\n else:\n user_name = os.environ['USERNAME']\n\n log_file = common_funcs.init_log_file('ssgp_classifier_by_%s.txt' % user_name, data_name, init_config)\n init_config['log_file'] = log_file\n model = SSGP_CLF(init_config)\n\n\n num_batches = int(len(train_inds) / batch_size)\n print(\"num train = %d, num test = %d, batch_size = %d, num batches = %d\" % (\n len(train_inds), len(test_inds), batch_size, num_batches))\n batch_X_y_gnrt = common_funcs.DataGenerator(train_inds, train_rates, shuffle=True)\n batch_inds, batch_rates = batch_X_y_gnrt.draw_next(init_config['init_batch_size'])\n model.fit_batch(batch_inds, batch_rates, num_iters_per_batch, print_every=20, verbose=True)\n for n_batch in range(1, num_batches + 1):\n\n batch_inds, batch_rates = batch_X_y_gnrt.draw_next(batch_size)\n\n verbose = n_batch % int(num_batches / 20) == 0\n\n model.fit_batch(batch_inds, batch_rates, steps=num_iters_per_batch, verbose=verbose,print_every=50)\n\n if verbose:\n y_pred, logtis_pred = model.predict(test_inds, return_logits=True, batch_size = 1024)\n acc = common_funcs.metrics_accuracy(test_rates, y_pred)\n auc = common_funcs.metrics_auc(test_rates, logtis_pred)\n\n test_llk = model.predict_log_llk( test_inds, test_rates, batch_size=1024)\n ave_test_llk = np.average( test_llk)\n\n print(\"\\nbatch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\\n\" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))\n log_file.write(\"batch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\\n\" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))\n log_file.flush()\n os.fsync(log_file.fileno())\n\n y_pred, logtis_pred = model.predict(test_inds, return_logits=True, batch_size = 1024)\n acc = common_funcs.metrics_accuracy(test_rates, y_pred)\n auc = common_funcs.metrics_auc(test_rates, logtis_pred)\n\n test_llk = model.predict_log_llk(test_inds, test_rates, batch_size=1024)\n ave_test_llk = np.average(test_llk)\n\n print(\"\\nbatch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\\n\" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))\n log_file.write(\"batch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\\n\" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))\n log_file.close()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "tensorflow.reshape", "tensorflow.global_variables_initializer", "numpy.concatenate", "tensorflow.concat", "numpy.log", "tensorflow.Variable", "numpy.eye", "tensorflow.ConfigProto", "tensorflow.variable_scope", "tensorflow.linalg.solve", "tensorflow.constant", "numpy.sqrt", "numpy.linalg.cholesky", "numpy.array", "tensorflow.train.AdamOptimizer", "numpy.zeros", "tensorflow.Session", "numpy.random.randn", "tensorflow.placeholder", "tensorflow.RunOptions", "tensorflow.assign", "tensorflow.random.normal", "numpy.sum", "sklearn.cluster.KMeans", "numpy.ones", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "numpy.average", "tensorflow.stop_gradient", "numpy.unique" ] ]
Yannick947/deep_bau
[ "878cbc452cee8f1e7832ccdd54e5c3ef598702e1" ]
[ "machine_learning/optimize_architecture.py" ]
[ "import datetime\n\nfrom machine_learning.classfication_models import create_bayesian_classifier, create_bayesian_dummy_classifier\nimport numpy as np\nimport pandas as pd\nimport kerastuner as kt\nfrom machine_learning.utils import get_datagen_split\nimport machine_learning.models\nimport machine_learning.classfication_models\nfrom machine_learning.models import create_hyperband_model\nfrom machine_learning.data_generator import BauGenerator\n\nBATCH_SIZE = 256\nLOOK_AHEAD_SIZE = 1\nLOOK_BACK_WINDOW_SIZE = 10\n\n\ndef dummy_classification(df: pd.DataFrame):\n df_train, df_val = get_datagen_split(df)\n\n datagen_train = BauGenerator(df=df_train, batch_size=BATCH_SIZE,\n window_size=LOOK_BACK_WINDOW_SIZE,\n look_ahead_steps=LOOK_AHEAD_SIZE)\n\n datagen_val = BauGenerator(df=df_val, batch_size=BATCH_SIZE,\n window_size=LOOK_BACK_WINDOW_SIZE,\n look_ahead_steps=LOOK_AHEAD_SIZE)\n\n machine_learning.classfication_models.HYPER_NUM_ROWS_DF = datagen_train.X_batches.shape[2]\n machine_learning.classfication_models.HYPER_NUM_OUTPUT_FIELDS = datagen_train.Y_batches.shape[\n 1]\n machine_learning.classfication_models.HYPER_WINDOW_SIZE = LOOK_BACK_WINDOW_SIZE\n machine_learning.classfication_models.HYPER_LOOK_AHEAD_SIZE = LOOK_AHEAD_SIZE\n\n tuner = kt.BayesianOptimization(create_bayesian_dummy_classifier,\n objective='val_accuracy',\n max_trials=100,\n project_name=\"arch_opt_\")\n\n tuner.search(datagen_train,\n validation_data=datagen_val,\n epochs=60,\n callbacks=[],\n workers=16)\n best_model = tuner.get_best_models(1)[0]\n best_hyperparameters = tuner.get_best_hyperparameters(1)[0]\n print(best_hyperparameters)\n\n\ndef bayesian_classification_optimization(df: pd.DataFrame):\n df_train, df_val = get_datagen_split(df)\n\n datagen_train = BauGenerator(df=df_train, batch_size=BATCH_SIZE,\n window_size=LOOK_BACK_WINDOW_SIZE,\n look_ahead_steps=LOOK_AHEAD_SIZE)\n\n datagen_val = BauGenerator(df=df_val, batch_size=BATCH_SIZE,\n window_size=LOOK_BACK_WINDOW_SIZE,\n look_ahead_steps=LOOK_AHEAD_SIZE)\n\n machine_learning.classfication_models.HYPER_NUM_ROWS_DF = datagen_train.X_batches.shape[2]\n machine_learning.classfication_models.HYPER_NUM_OUTPUT_FIELDS = datagen_train.Y_batches.shape[\n 1]\n machine_learning.classfication_models.HYPER_WINDOW_SIZE = LOOK_BACK_WINDOW_SIZE\n machine_learning.classfication_models.HYPER_LOOK_AHEAD_SIZE = LOOK_AHEAD_SIZE\n\n tuner = kt.BayesianOptimization(create_bayesian_classifier,\n objective='val_loss',\n max_trials=200)\n\n tuner.search(datagen_train,\n validation_data=datagen_val,\n epochs=150,\n callbacks=[],\n workers=16)\n best_model = tuner.get_best_models(1)[0]\n best_hyperparameters = tuner.get_best_hyperparameters(1)[0]\n print(best_hyperparameters)\n\n\ndef hyperband_optimization(df: pd.DataFrame):\n df_train, df_val = get_datagen_split(df)\n\n datagen_train = BauGenerator(df=df_train, binarize_activity_hours=False, batch_size=BATCH_SIZE,\n window_size=LOOK_BACK_WINDOW_SIZE,\n look_ahead_steps=LOOK_AHEAD_SIZE)\n\n datagen_val = BauGenerator(df=df_val, binarize_activity_hours=False, batch_size=BATCH_SIZE,\n window_size=LOOK_BACK_WINDOW_SIZE,\n look_ahead_steps=LOOK_AHEAD_SIZE)\n\n machine_learning.models.HYPER_NUM_ROWS_DF = datagen_train.X_batches.shape[2]\n machine_learning.models.HYPER_NUM_OUTPUT_FIELDS = datagen_train.Y_batches.shape[2]\n machine_learning.models.HYPER_WINDOW_SIZE = LOOK_BACK_WINDOW_SIZE\n machine_learning.models.HYPER_LOOK_AHEAD_SIZE = LOOK_AHEAD_SIZE\n\n tuner = kt.BayesianOptimization(create_hyperband_model,\n objective='val_binary_accuracy',\n max_trials=200)\n\n tuner.search(datagen_train,\n validation_data=datagen_val,\n epochs=70,\n callbacks=[],\n workers=16)\n best_model = tuner.get_best_models(1)[0]\n best_hyperparameters = tuner.get_best_hyperparameters(1)[0]\n print(best_hyperparameters)\n\n\nif __name__ == '__main__':\n\n PERCENTAGE_USED_DATA = 0.7\n\n working_hours = pd.read_csv(\n \"./data/preprocessed/df_deep_bau.csv\", error_bad_lines=False, sep=',', index_col=False)\n\n start_index = int((1 - PERCENTAGE_USED_DATA) * working_hours.shape[0])\n\n working_hours = working_hours[start_index:]\n\n df = working_hours.select_dtypes([np.number])\n\n dummy_classification(df)\n" ]
[ [ "pandas.read_csv" ] ]
Merterm/-Modeling-Intensification-for-SLG
[ "800fff3d3c7bacc86c1db8382f7c2e68d2f0c074" ]
[ "code/generation-model/dynamic_selection/decoders.py" ]
[ "from codecs import EncodedFile\nimport torch.nn as nn\nfrom torch import Tensor\nimport torch\nfrom helpers import freeze_params, ConfigurationError, subsequent_mask, uneven_subsequent_mask\nfrom transformer_layers import PositionalEncoding, \\\n TransformerDecoderLayer, TransformerEncoderLayer\nimport random\n\n\nclass Decoder(nn.Module):\n \"\"\"\n Base decoder class\n \"\"\"\n\n @property\n def output_size(self):\n \"\"\"\n Return the output size (size of the target vocabulary)\n\n :return:\n \"\"\"\n return self._output_size\n\n\nclass TransformerDecoder(Decoder):\n \"\"\"\n A transformer decoder with N masked layers.\n Decoder layers are masked so that an attention head cannot see the future.\n \"\"\"\n\n def __init__(self,\n num_layers: int = 4,\n num_heads: int = 8,\n hidden_size: int = 512,\n ff_size: int = 2048,\n dropout: float = 0.1,\n emb_dropout: float = 0.1,\n vocab_size: int = 1,\n freeze: bool = False,\n trg_size: int = 97,\n decoder_trg_trg_: bool = True,\n **kwargs):\n \"\"\"\n Initialize a Transformer decoder.\n\n :param num_layers: number of Transformer layers\n :param num_heads: number of heads for each layer\n :param hidden_size: hidden size\n :param ff_size: position-wise feed-forward size\n :param dropout: dropout probability (1-keep)\n :param emb_dropout: dropout probability for embeddings\n :param vocab_size: size of the output vocabulary\n :param freeze: set to True keep all decoder parameters fixed\n :param kwargs:\n \"\"\"\n super(TransformerDecoder, self).__init__()\n\n self._hidden_size = hidden_size\n\n # Dynamic output size depending on the target size\n self._output_size = trg_size\n\n # create num_layers decoder layers and put them in a list\n self.layers = nn.ModuleList([TransformerDecoderLayer(\n size=hidden_size, ff_size=ff_size, num_heads=num_heads,\n dropout=dropout, decoder_trg_trg=decoder_trg_trg_) for _ in range(num_layers)])\n\n self.pe = PositionalEncoding(hidden_size, mask_count=True)\n self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-6)\n\n self.emb_dropout = nn.Dropout(p=emb_dropout)\n\n # Output layer to be the size of joints vector + 1 for counter (total is trg_size)\n self.output_layer = nn.Linear(hidden_size, trg_size, bias=False)\n\n if freeze:\n freeze_params(self)\n\n def forward(self,\n trg_embed: Tensor = None,\n encoder_output: Tensor = None,\n src_mask: Tensor = None,\n trg_mask: Tensor = None,\n **kwargs):\n \"\"\"\n Transformer decoder forward pass.\n\n :param trg_embed: embedded targets\n :param encoder_output: source representations\n :param encoder_hidden: unused\n :param src_mask:\n :param unroll_steps: unused\n :param hidden: unused\n :param trg_mask: to mask out target paddings\n Note that a subsequent mask is applied here.\n :param kwargs:\n :return:\n \"\"\"\n assert trg_mask is not None, \"trg_mask required for Transformer\"\n\n # add position encoding to word embedding\n x = self.pe(trg_embed)\n # Dropout if given\n x = self.emb_dropout(x)\n\n padding_mask = trg_mask\n # Create subsequent mask for decoding\n sub_mask = subsequent_mask(\n trg_embed.size(1)).type_as(trg_mask)\n\n # Apply each layer to the input\n for layer in self.layers:\n x = layer(x=x, memory=encoder_output,\n src_mask=src_mask, trg_mask=sub_mask, padding_mask=padding_mask)\n\n # Apply a layer normalisation\n x = self.layer_norm(x)\n # Output layer turns it back into vectors of size trg_size\n output = self.output_layer(x)\n\n return output, x, None, None\n\n def __repr__(self):\n return \"%s(num_layers=%r, num_heads=%r)\" % (\n self.__class__.__name__, len(self.layers),\n self.layers[0].trg_trg_att.num_heads)\n\n\nclass DynamicTransformerDecoder(Decoder):\n \"\"\"\n A transformer decoder with N masked layers.\n Decoder layers are masked so that an attention head cannot see the future.\n \"\"\"\n\n def __init__(self,\n num_layers: int = 4,\n num_heads: int = 8,\n hidden_size: int = 512,\n ff_size: int = 2048,\n dropout: float = 0.1,\n emb_dropout: float = 0.1,\n vocab_size: int = 1,\n freeze: bool = False,\n trg_size: int = 97,\n decoder_trg_trg_: bool = True,\n **kwargs):\n \"\"\"\n Initialize a Transformer decoder.\n\n :param num_layers: number of Transformer layers\n :param num_heads: number of heads for each layer\n :param hidden_size: hidden size\n :param ff_size: position-wise feed-forward size\n :param dropout: dropout probability (1-keep)\n :param emb_dropout: dropout probability for embeddings\n :param vocab_size: size of the output vocabulary\n :param freeze: set to True keep all decoder parameters fixed\n :param kwargs:\n \"\"\"\n super(DynamicTransformerDecoder, self).__init__()\n\n self._hidden_size = hidden_size\n\n # Dynamic output size depending on the target size\n self._output_size = trg_size\n\n # create num_layers decoder layers and put them in a list\n self.layers = nn.ModuleList([TransformerDecoderLayer(\n size=hidden_size, ff_size=ff_size, num_heads=num_heads,\n dropout=dropout, decoder_trg_trg=decoder_trg_trg_) for _ in range(num_layers)])\n\n self.pe = PositionalEncoding(hidden_size, mask_count=True)\n self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-6)\n\n self.emb_dropout = nn.Dropout(p=emb_dropout)\n self.softmax = nn.Softmax(dim=2)\n # Output layer to be the size of joints vector + 1 for counter (total is trg_size)\n self.output_layer = nn.Linear(hidden_size, trg_size, bias=False)\n self.mlp = nn.Sequential(nn.Linear(hidden_size, hidden_size // 2),\n nn.ReLU(),\n nn.Linear(hidden_size // 2, 1))\n\n if freeze:\n freeze_params(self)\n\n def forward(self,\n trg_embed: Tensor = None,\n encoder1_output: Tensor = None,\n encoder2_output: Tensor = None,\n src1_mask: Tensor = None,\n src2_mask: Tensor = None,\n trg_mask: Tensor = None,\n T=None,\n balance_weight=None,\n epoch_num=0,\n **kwargs):\n \"\"\"\n Transformer decoder forward pass.\n\n :param trg_embed: embedded targets\n :param encoder_output: source representations\n :param encoder_hidden: unused\n :param src_mask:\n :param unroll_steps: unused\n :param hidden: unused\n :param trg_mask: to mask out target paddings\n Note that a subsequent mask is applied here.\n :param kwargs:\n :return:\n \"\"\"\n assert trg_mask is not None, \"trg_mask required for Transformer\"\n\n self.balance_weight = balance_weight\n self.T = T\n # add position encoding to word embedding\n x = self.pe(trg_embed)\n\n # Dropout if given\n x1 = self.emb_dropout(x)\n x2 = x1.clone()\n \n\n padding_mask = trg_mask\n # Create subsequent mask for decoding\n sub_mask = subsequent_mask(\n trg_embed.size(1)).type_as(trg_mask)\n\n # Apply each layer to the input\n for layer in self.layers:\n\n x1 = layer(x=x1, memory=encoder1_output,\n src_mask=src1_mask, trg_mask=sub_mask, padding_mask=padding_mask)\n x2 = layer(x=x2, memory=encoder2_output,\n src_mask=src2_mask, trg_mask=sub_mask, padding_mask=padding_mask)\n\n # Have three views and at each time step, do a weighted result.\n\n # G step:\n if epoch_num % 1 == 0:\n #print(\"update expert picker\")\n p1 = self.mlp(x1)\n p2 = self.mlp(x2)\n\n Hw = torch.cat([p1, p2], dim=2)\n\n balance_weight = self.softmax(Hw)\n\n # # weighted.\n # all_attns = torch.cat(\n # [x1.unsqueeze(2), x2.unsqueeze(2)], dim=2)\n # new_weight = balance_weight.unsqueeze(-1).expand_as(all_attns)\n # combined_x = all_attns.mul(new_weight)\n # combined_x = combined_x.sum(dim=2).squeeze(2)\n\n # # one hot.\n a, new_indexes = torch.max(balance_weight, dim=2)\n mask1 = new_indexes == 0\n mask1 = mask1.unsqueeze(-1).expand_as(x1)\n mask2 = new_indexes == 1\n \n mask2 = mask2.unsqueeze(-1).expand_as(x2)\n \n combined_x = x1 * mask1 + x2*mask2\n else:\n choice = random.choice([0, 1])\n combined_x = [x1, x2][choice]\n\n combined_x = self.layer_norm(combined_x)\n # Output layer turns it back into vectors of size trg_size\n output = self.output_layer(combined_x)\n\n return output, x1, None, None\n\n def __repr__(self):\n return \"%s(num_layers=%r, num_heads=%r)\" % (\n self.__class__.__name__, len(self.layers),\n self.layers[0].trg_trg_att.num_heads)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.LayerNorm", "torch.cat", "torch.nn.Softmax", "torch.max", "torch.nn.ReLU" ] ]
tls1403/PythonTest
[ "069f23b25ec655aa199d13aef9c14d2e33366861" ]
[ "part4/matplotlib/practice.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\n#ํ•œ๊ธ€ ํฐํŠธ ์˜ค๋ฅ˜ ์ œ๊ฑฐ\nfrom matplotlib import font_manager,rc\nfont_path =\"D:/5674-833_4th/part4/malgun.ttf\"\nfont_name = font_manager.FontProperties(fname=font_path).get_name()\nrc('font',family = font_name)\n\ndf = pd.read_excel('D:/5674-833_4th/part4/์‹œ๋„๋ณ„ ์ „์ถœ์ž… ์ธ๊ตฌ์ˆ˜.xlsx',engine = 'openpyxl',header =0)\ndf = df.fillna(method='ffill') #๋ˆ„๋ฝ๊ฐ’์„ ์•ž ๋ฐ์ดํ„ฐ๋กœ ์ฑ„์›€\n\n#์„œ์šธ์—์„œ ๋‹ค๋ฅธ ์ง€์—ญ์œผ๋กœ ์ด๋™ํ•œ ๋ฐ์ดํ„ฐ๋งŒ ์ถ”์ถœํ•˜์—ฌ ์ •๋ฆฌ\nmask = (df['์ „์ถœ์ง€๋ณ„'] == '์„œ์šธํŠน๋ณ„์‹œ') & (df['์ „์ž…์ง€๋ณ„'] != '์„œ์šธํŠน๋ณ„์‹œ')\ndf_seoul = df[mask]\ndf_seoul = df_seoul.drop(['์ „์ถœ์ง€๋ณ„'],axis= 1) #์ „์ถœ์ง€๋ณ„ column ์‚ญ์ œ\ndf_seoul.rename({'์ „์ž…์ง€๋ณ„':'์ „์ž…์ง€'},axis=1,inplace=True) #์ „์ž…์ง€๋ณ„ column์„ ์ „์ž…์ง€๋กœ ๋ฐ”๊ฟ”์คŒ\ndf_seoul.set_index('์ „์ž…์ง€',inplace = True)\n\ncol_years = list(map(str,range(2010,2018)))\ndf_4 = df_seoul.loc[['์ถฉ์ฒญ๋‚จ๋„','๊ฒฝ์ƒ๋ถ๋„','๊ฐ•์›๋„','์ „๋ผ๋‚จ๋„'],col_years]\ndf_4['ํ•ฉ๊ณ„'] = df_4.sum(axis = 1) #ํ•ฉ๊ณ„๋ณ€์ˆ˜ ์ถ”๊ฐ€\ndf_total = df_4[['ํ•ฉ๊ณ„']].sort_values(by='ํ•ฉ๊ณ„',ascending= True) #df_4['ํ•ฉ๊ณ„'] ๋Š” datatype์ด Series ์ด๊ณ  df_4[['ํ•ฉ๊ณ„']] ๋Š” datatype ์ด dataFrame ์ด๋‹ค.\n#์Šคํƒ€์ผ ์„œ์‹ ์ง€์ •\nplt.style.use('ggplot')\n#์ˆ˜ํ‰๋ง‰๋Œ€๊ทธ๋ž˜ํ”„ ๊ทธ๋ฆฌ๊ธฐ\ndf_total.plot(kind = 'barh',color='cornflowerblue',width =0.5,figsize = (10,5))\n\nplt.title('์„œ์šธ -> ํƒ€์‹œ๋„ ์ธ๊ตฌ์ด๋™')\nplt.ylabel('์ „์ž…์ง€')\nplt.xlabel('์ด๋™ ์ธ๊ตฌ์ˆ˜')\nplt.show()\n" ]
[ [ "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.xlabel", "pandas.read_excel", "matplotlib.pyplot.title", "matplotlib.rc", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show" ] ]
jfsantos/stable-baselines
[ "5bd4ffa98e364b9e8e8b4e64bc2d1be9b6e4897a" ]
[ "stable_baselines/her/experiment/train.py" ]
[ "import os\nimport sys\nfrom subprocess import CalledProcessError\n\nimport click\nimport numpy as np\nimport json\nfrom mpi4py import MPI\n\nfrom stable_baselines import logger\nfrom stable_baselines.common import set_global_seeds, tf_util\nfrom stable_baselines.common.mpi_moments import mpi_moments\nimport stable_baselines.her.experiment.config as config\nfrom stable_baselines.her.rollout import RolloutWorker\nfrom stable_baselines.her.util import mpi_fork\n\n\ndef mpi_average(value):\n \"\"\"\n calculate the average from the array, using MPI\n\n :param value: (numpy Number) the array\n :return: (float) the average\n \"\"\"\n if len(value) == 0:\n value = [0.]\n if not isinstance(value, list):\n value = [value]\n return mpi_moments(np.array(value))[0]\n\n\ndef train(policy, rollout_worker, evaluator, n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval,\n save_policies):\n \"\"\"\n train the given policy\n\n :param policy: (her.DDPG) the policy to train\n :param rollout_worker: (RolloutWorker) Rollout worker generates experience for training.\n :param evaluator: (RolloutWorker) Rollout worker for evalutation\n :param n_epochs: (int) the number of epochs\n :param n_test_rollouts: (int) the number of for the evalutation RolloutWorker\n :param n_cycles: (int) the number of cycles for training per epoch\n :param n_batches: (int) the batch size\n :param policy_save_interval: (int) the interval with which policy pickles are saved.\n If set to 0, only the best and latest policy will be pickled.\n :param save_policies: (bool) whether or not to save the policies\n \"\"\"\n rank = MPI.COMM_WORLD.Get_rank()\n\n latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pkl')\n best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pkl')\n periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pkl')\n\n logger.info(\"Training...\")\n best_success_rate = -1\n for epoch in range(n_epochs):\n # train\n rollout_worker.clear_history()\n for _ in range(n_cycles):\n episode = rollout_worker.generate_rollouts()\n policy.store_episode(episode)\n for _ in range(n_batches):\n policy.train_step()\n policy.update_target_net()\n\n # test\n evaluator.clear_history()\n for _ in range(n_test_rollouts):\n evaluator.generate_rollouts()\n\n # record logs\n logger.record_tabular('epoch', epoch)\n for key, val in evaluator.logs('test'):\n logger.record_tabular(key, mpi_average(val))\n for key, val in rollout_worker.logs('train'):\n logger.record_tabular(key, mpi_average(val))\n for key, val in policy.logs():\n logger.record_tabular(key, mpi_average(val))\n\n if rank == 0:\n logger.dump_tabular()\n\n # save the policy if it's better than the previous ones\n success_rate = mpi_average(evaluator.current_success_rate())\n if rank == 0 and success_rate >= best_success_rate and save_policies:\n best_success_rate = success_rate\n logger.info('New best success rate: {}. Saving policy to {} ...'\n .format(best_success_rate, best_policy_path))\n evaluator.save_policy(best_policy_path)\n evaluator.save_policy(latest_policy_path)\n if rank == 0 and policy_save_interval > 0 and epoch % policy_save_interval == 0 and save_policies:\n policy_path = periodic_policy_path.format(epoch)\n logger.info('Saving periodic policy to {} ...'.format(policy_path))\n evaluator.save_policy(policy_path)\n\n # make sure that different threads have different seeds\n local_uniform = np.random.uniform(size=(1,))\n root_uniform = local_uniform.copy()\n MPI.COMM_WORLD.Bcast(root_uniform, root=0)\n if rank != 0:\n assert local_uniform[0] != root_uniform[0]\n\n\ndef launch(env, logdir, n_epochs, num_cpu, seed, replay_strategy, policy_save_interval, clip_return,\n override_params=None, save_policies=True):\n \"\"\"\n launch training with mpi\n\n :param env: (str) environment ID\n :param logdir: (str) the log directory\n :param n_epochs: (int) the number of training epochs\n :param num_cpu: (int) the number of CPUs to run on\n :param seed: (int) the initial random seed\n :param replay_strategy: (str) the type of replay strategy ('future' or 'none')\n :param policy_save_interval: (int) the interval with which policy pickles are saved.\n If set to 0, only the best and latest policy will be pickled.\n :param clip_return: (float): clip returns to be in [-clip_return, clip_return]\n :param override_params: (dict) override any parameter for training\n :param save_policies: (bool) whether or not to save the policies\n \"\"\"\n\n if override_params is None:\n override_params = {}\n # Fork for multi-CPU MPI implementation.\n if num_cpu > 1:\n try:\n whoami = mpi_fork(num_cpu, ['--bind-to', 'core'])\n except CalledProcessError:\n # fancy version of mpi call failed, try simple version\n whoami = mpi_fork(num_cpu)\n\n if whoami == 'parent':\n sys.exit(0)\n tf_util.single_threaded_session().__enter__()\n rank = MPI.COMM_WORLD.Get_rank()\n\n # Configure logging\n if rank == 0:\n if logdir or logger.get_dir() is None:\n logger.configure(folder=logdir)\n else:\n logger.configure()\n logdir = logger.get_dir()\n assert logdir is not None\n os.makedirs(logdir, exist_ok=True)\n\n # Seed everything.\n rank_seed = seed + 1000000 * rank\n set_global_seeds(rank_seed)\n\n # Prepare params.\n params = config.DEFAULT_PARAMS\n params['env_name'] = env\n params['replay_strategy'] = replay_strategy\n if env in config.DEFAULT_ENV_PARAMS:\n params.update(config.DEFAULT_ENV_PARAMS[env]) # merge env-specific parameters in\n params.update(**override_params) # makes it possible to override any parameter\n with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as file_handler:\n json.dump(params, file_handler)\n params = config.prepare_params(params)\n config.log_params(params, logger_input=logger)\n\n if num_cpu == 1:\n logger.warn()\n logger.warn('*** Warning ***')\n logger.warn(\n 'You are running HER with just a single MPI worker. This will work, but the ' +\n 'experiments that we report in Plappert et al. (2018, https://arxiv.org/abs/1802.09464) ' +\n 'were obtained with --num_cpu 19. This makes a significant difference and if you ' +\n 'are looking to reproduce those results, be aware of this. Please also refer to ' +\n 'https://github.com/openai/stable_baselines/issues/314 for further details.')\n logger.warn('****************')\n logger.warn()\n\n dims = config.configure_dims(params)\n policy = config.configure_ddpg(dims=dims, params=params, clip_return=clip_return)\n\n rollout_params = {\n 'exploit': False,\n 'use_target_net': False,\n # 'use_demo_states': True,\n 'compute_q': False,\n 'time_horizon': params['time_horizon'],\n }\n\n eval_params = {\n 'exploit': True,\n 'use_target_net': params['test_with_polyak'],\n # 'use_demo_states': False,\n 'compute_q': True,\n 'time_horizon': params['time_horizon'],\n }\n\n for name in ['time_horizon', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:\n rollout_params[name] = params[name]\n eval_params[name] = params[name]\n\n rollout_worker = RolloutWorker(params['make_env'], policy, dims, logger, **rollout_params)\n rollout_worker.seed(rank_seed)\n\n evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)\n evaluator.seed(rank_seed)\n\n train(\n policy=policy, rollout_worker=rollout_worker,\n evaluator=evaluator, n_epochs=n_epochs, n_test_rollouts=params['n_test_rollouts'],\n n_cycles=params['n_cycles'], n_batches=params['n_batches'],\n policy_save_interval=policy_save_interval, save_policies=save_policies)\n\n\[email protected]()\[email protected]('--env', type=str, default='FetchReach-v1',\n help='the name of the OpenAI Gym environment that you want to train on')\[email protected]('--logdir', type=str, default=None,\n help='the path to where logs and policy pickles should go. If not specified, creates a folder in /tmp/')\[email protected]('--n_epochs', type=int, default=50, help='the number of training epochs to run')\[email protected]('--num_cpu', type=int, default=1, help='the number of CPU cores to use (using MPI)')\[email protected]('--seed', type=int, default=0,\n help='the random seed used to seed both the environment and the training code')\[email protected]('--policy_save_interval', type=int, default=5,\n help='the interval with which policy pickles are saved. '\n 'If set to 0, only the best and latest policy will be pickled.')\[email protected]('--replay_strategy', type=click.Choice(['future', 'none']), default='future',\n help='the HER replay strategy to be used. \"future\" uses HER, \"none\" disables HER.')\[email protected]('--clip_return', type=int, default=1, help='whether or not returns should be clipped')\ndef main(**kwargs):\n \"\"\"\n run launch for MPI HER DDPG training\n\n :param kwargs: (dict) the launch kwargs\n \"\"\"\n launch(**kwargs)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.random.uniform" ] ]
shar999/mead-baseline
[ "bd9cd02c0a1d9c0df91aca171774a6967e6ce190" ]
[ "baseline/utils.py" ]
[ "import six\nimport os\nimport re\nimport sys\nimport json\nimport gzip\nimport pickle\nimport hashlib\nimport logging\nimport zipfile\nimport platform\nfrom operator import lt, le, gt, ge\nfrom contextlib import contextmanager\nfrom typing import Dict, List, Set, Optional\nimport numpy as np\nimport collections\nimport eight_mile\nimport importlib\nfrom eight_mile.utils import *\nfrom eight_mile.downloads import *\nimport addons\n\n\n__all__ = []\n__all__.extend(eight_mile.utils.__all__)\n__all__.extend(eight_mile.downloads.__all__)\nlogger = logging.getLogger('baseline')\n# These are inputs to models that shouldn't be saved out\nMAGIC_VARS = ['sess', 'tgt', 'y', 'lengths', 'gpus']\nMAGIC_VARS = ['sess', 'tgt', 'y', 'lengths']\n\nexport = exporter(__all__)\n\n\nexport(str2bool)\n\n\n@export\ndef normalize_backend(name: str) -> str:\n allowed_backends = {'tf', 'pytorch', 'onnx'}\n name = name.lower()\n if name == 'tensorflow':\n name = 'tf'\n elif name == 'torch' or name == 'pyt':\n name = 'pytorch'\n if name not in allowed_backends:\n raise ValueError(\"Supported backends are %s, got %s\" % (allowed_backends, name))\n return name\n\n\n@export\ndef get_console_logger(name, level=None, env_key='LOG_LEVEL'):\n \"\"\"A small default logging setup.\n\n This is a default logging setup to print json formatted logging to\n the console. This is used as a default for when baseline/mead is used\n as an API. This can be overridden with the logging config.\n\n The level defaults to `INFO` but can also be read from an env var\n of you choice with a back off to `LOG_LEVEL`\n\n :param name: `str` The logger to create.\n :param level: `str` The level to look for.\n :param env_key: `str` The env var to look in.\n\n :returns: logging.Logger\n \"\"\"\n if level is None:\n level = os.getenv(env_key, os.getenv('LOG_LEVEL', 'INFO'))\n level = get_logging_level(level)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n ch = logging.StreamHandler()\n ch.setLevel(level)\n formatter = JSONFormatter()\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger.propagate = False\n return logger\n\n\n@contextmanager\ndef redirect(from_stream, to_stream):\n original_from = from_stream.fileno()\n saved_from = os.dup(original_from)\n os.dup2(to_stream.fileno(), original_from)\n try:\n yield\n os.dup2(saved_from, original_from)\n except Exception as e:\n os.dup2(saved_from, original_from)\n raise(e)\n\n\n@export\n@contextmanager\ndef suppress_output():\n with open(os.devnull, 'w') as devnull, redirect(sys.stdout, devnull), redirect(sys.stderr, devnull):\n yield\n\n\n@export\nclass Colors:\n GREEN = '\\033[32;1m'\n RED = '\\033[31;1m'\n YELLOW = '\\033[33;1m'\n BLACK = '\\033[30;1m'\n CYAN = '\\033[36;1m'\n RESTORE = '\\033[0m'\n\n\n@export\ndef color(msg: str, color: str) -> str:\n if platform.system() == 'Windows':\n return msg\n return f\"{color}{msg}{Colors.RESTORE}\"\n\n\nclass ColoredFormatter(logging.Formatter):\n COLORS = {\n 'WARNING': Colors.YELLOW,\n 'ERROR': Colors.RED\n }\n\n def format(self, record):\n if record.levelname in self.COLORS:\n return color(super().format(record), self.COLORS[record.levelname])\n return super().format(record)\n\n\nclass JSONFormatter(ColoredFormatter):\n \"\"\"Format message as JSON if possible, log normally otherwise.\"\"\"\n def format(self, record):\n try:\n if isinstance(record.msg, (list, dict)):\n return json.dumps(record.msg)\n except TypeError:\n pass\n return super().format(record)\n\n\nclass MakeFileHandler(logging.FileHandler):\n \"\"\"A File logger that will create intermediate dirs if need be.\"\"\"\n def __init__(self, filename, mode='a', encoding=None, delay=0):\n log_dir = os.path.dirname(filename)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n super().__init__(filename, mode, encoding, delay)\n\n\n@export\ndef lowercase(x):\n return x.lower()\n\n\nUNREP_EMOTICONS = (\n ':)',\n ':(((',\n ':D',\n '=)',\n ':-)',\n '=(',\n '(=',\n '=[[',\n)\n\n\n@export\ndef web_cleanup(word):\n if word.startswith('http'): return 'URL'\n if word.startswith('@'): return '@@@@'\n if word.startswith('#'): return '####'\n if word == '\"': return ','\n if word in UNREP_EMOTICONS: return ';)'\n if word == '<3': return '&lt;3'\n return word\n\n\n@export\ndef get_model_file(task, platform, basedir=None):\n \"\"\"Model name file helper to abstract different DL platforms (FWs)\n\n :param dictionary:\n :param task:\n :param platform:\n :return:\n \"\"\"\n basedir = './' if basedir is None else basedir\n base = '{}/{}-model'.format(basedir, task)\n rid = os.getpid()\n if platform.startswith('pyt'):\n name = '%s-%d.pyt' % (base, rid)\n else:\n name = '%s-%s-%d' % (base, platform, rid)\n logger.info('model file [%s]' % name)\n return name\n\n\n@export\ndef lookup_sentence(rlut: Dict[int, str], seq: List[str], reverse: bool = False, padchar: str = '') -> str:\n \"\"\"Lookup a sentence by id and return words\n\n :param rlut: an index -> word lookup table\n :param seq: A temporal sequence\n :param reverse: (``bool``) Should reverse?\n :param padchar: What padding character to use when replacing with words\n :return:\n \"\"\"\n s = seq[::-1] if reverse else seq\n res = []\n for idx in s:\n idx = int(idx)\n char = padchar\n if idx == Offsets.EOS: break\n if idx != Offsets.PAD and idx != Offsets.GO:\n char = rlut[idx]\n res.append(char)\n return (' '.join(res)).strip()\n\n\n@export\ndef topk(k, probs):\n \"\"\"Get a sparse index (dictionary of top values).\"\"\"\n idx = np.argpartition(probs, probs.size-k)[-k:]\n sort = idx[np.argsort(probs[idx])][::-1]\n return dict(zip(sort, probs[sort]))\n\n\n@export\ndef beam_multinomial(k, probs):\n \"\"\"Prune all elements in a large probability distribution below the top K.\n\n Renormalize the distribution with only top K, and then sample n times out of that.\n \"\"\"\n\n tops = topk(k, probs)\n i = 0\n n = len(tops.keys())\n ary = np.zeros((n))\n idx = []\n for abs_idx, v in tops.items():\n ary[i] = v\n idx.append(abs_idx)\n i += 1\n\n ary /= np.sum(ary)\n sample_idx = np.argmax(np.random.multinomial(1, ary))\n return idx[sample_idx]\n\n\n@export\ndef unzip_model(path):\n path = unzip_files(path)\n return os.path.join(path, [x[:-6] for x in os.listdir(path) if 'index' in x][0])\n\n\n@export\ndef save_vectorizers(basedir, vectorizers, name='vectorizers'):\n save_md_file = os.path.join(basedir, '{}-{}.pkl'.format(name, os.getpid()))\n with open(save_md_file, 'wb') as f:\n pickle.dump(vectorizers, f)\n # Save out the vectorizer module names so we can automatically import them\n # when reloading without going all the way to a pure json save\n vectorizer_modules = [v.__class__.__module__ for v in vectorizers.values()]\n module_file = os.path.join(basedir, '{}-{}.json'.format(name, os.getpid()))\n write_json(vectorizer_modules, module_file)\n\n\n@export\ndef save_vocabs(basedir, embeds_or_vocabs, name='vocabs'):\n for k, embeds_or_vocabs in embeds_or_vocabs.items():\n save_md = '{}/{}-{}-{}.json'.format(basedir, name, k, os.getpid())\n # Its a vocab\n if isinstance(embeds_or_vocabs, collections.Mapping):\n write_json(embeds_or_vocabs, save_md)\n # Type is embeds\n else:\n write_json(embeds_or_vocabs.vocab, save_md)\n\n\n@export\ndef load_vocabs(directory: str, suffix: Optional[str] = None):\n vocab_fnames = find_files_with_prefix(directory, 'vocabs', suffix)\n vocabs = {}\n for f in vocab_fnames:\n logger.info(f)\n k = f.split('-')[-2]\n vocab = read_json(f)\n vocabs[k] = vocab\n return vocabs\n\n\n@export\ndef load_vectorizers(directory: str, data_download_cache: Optional[str] = None):\n vectorizers_fname = find_files_with_prefix(directory, 'vectorizers')\n # Find the module list for the vectorizer so we can import them without\n # needing to bother the user with providing them\n vectorizers_modules = [x for x in vectorizers_fname if 'json' in x][0]\n modules = read_json(vectorizers_modules)\n for module in modules:\n import_user_module(module, data_download_cache)\n vectorizers_pickle = [x for x in vectorizers_fname if 'pkl' in x][0]\n with open(vectorizers_pickle, \"rb\") as f:\n vectorizers = pickle.load(f)\n return vectorizers\n\n\n@export\ndef unzip_files(zip_path):\n if os.path.isdir(zip_path):\n return zip_path\n from eight_mile.utils import mime_type\n if mime_type(zip_path) == 'application/zip':\n with open(zip_path, 'rb') as f:\n sha1 = hashlib.sha1(f.read()).hexdigest()\n temp_dir = os.path.join(\"/tmp/\", sha1)\n if not os.path.exists(temp_dir):\n logger.info(\"unzipping model\")\n with zipfile.ZipFile(zip_path, \"r\") as zip_ref:\n zip_ref.extractall(temp_dir)\n if len(os.listdir(temp_dir)) == 1: # a directory was zipped v files\n temp_dir = os.path.join(temp_dir, os.listdir(temp_dir)[0])\n return temp_dir\n return zip_path\n\n\n@export\ndef find_model_basename(directory, basename=None):\n if not basename:\n basename = [x for x in os.listdir(directory) if 'model' in x and '-md' not in x and 'wgt' not in x and '.assets' not in x][0]\n else:\n globname = os.path.join(directory, basename)\n if not os.path.isfile(globname):\n import glob\n out = glob.glob(f'{globname}*')\n out = [x for x in out if 'model' in x and '-md' not in x and 'wgt' not in x and '.assets' not in x][0]\n basename = out\n path = os.path.join(directory, basename)\n logger.info(path)\n path = path.split('.')[:-1]\n return '.'.join(path)\n\n\n@export\ndef find_files_with_prefix(directory, prefix, suffix=None):\n\n files_with_prefix = [os.path.join(directory, x) for x in os.listdir(directory) if x.startswith(prefix)]\n if suffix:\n files_with_prefix = [f for f in files_with_prefix if f.endswith(suffix)]\n return files_with_prefix\n\n\n@export\ndef zip_files(basedir, limit_to_pid=True):\n pid = str(os.getpid())\n tgt_zip_base = os.path.abspath(basedir)\n zip_name = os.path.basename(tgt_zip_base)\n if limit_to_pid:\n model_files = [x for x in os.listdir(basedir) if pid in x and os.path.isfile(os.path.join(basedir, x))]\n else:\n model_files = [x for x in os.listdir(basedir) if os.path.isfile(os.path.join(basedir, x))]\n with zipfile.ZipFile(\"{}-{}.zip\".format(tgt_zip_base, pid), \"w\") as z:\n for f in model_files:\n abs_f = os.path.join(basedir, f)\n z.write(abs_f, os.path.join(zip_name, f))\n os.remove(abs_f)\n\n\n@export\ndef zip_model(path):\n \"\"\"zips the model files\"\"\"\n logger.info(\"zipping model files\")\n model_files = [x for x in os.listdir(\".\") if path[2:] in x]\n z = zipfile.ZipFile(\"{}.zip\".format(path), \"w\")\n for f in model_files:\n z.write(f)\n os.remove(f)\n z.close()\n\n\n@export\ndef verbose_output(verbose, confusion_matrix):\n if verbose is None:\n return\n do_print = bool(verbose.get(\"console\", False))\n outfile = verbose.get(\"file\", None)\n if do_print:\n logger.info(confusion_matrix)\n if outfile is not None:\n confusion_matrix.save(outfile)\n\n\nLESS_THAN_METRICS = {\"avg_loss\", \"loss\", \"perplexity\", \"ppl\"}\n\n\n@export\ndef get_metric_cmp(metric, user_cmp=None, less_than_metrics=LESS_THAN_METRICS):\n if user_cmp is not None:\n return _try_user_cmp(user_cmp)\n if metric in less_than_metrics:\n return lt, six.MAXSIZE\n return gt, -six.MAXSIZE - 1\n\n\ndef _try_user_cmp(user_cmp):\n user_cmp = user_cmp.lower()\n if user_cmp in {\"lt\", \"less\", \"less than\", \"<\", \"less_than\"}:\n return lt, six.MAXSIZE\n if user_cmp in {\"le\", \"lte\", \"<=\"}:\n return le, six.MAXSIZE\n if user_cmp in {\"ge\", \"gte\", \">=\"}:\n return ge, -six.MAXSIZE - 1\n return gt, -six.MAXSIZE - 1\n\n\n@export\ndef show_examples(model, es, rlut1, rlut2, vocab, mxlen, sample, prob_clip, max_examples, reverse):\n \"\"\"Expects model.predict to return (preds [B, K, T], scores [B, K]).\"\"\"\n try:\n si = np.random.randint(0, len(es))\n batch_dict = es[si]\n except:\n batch_dict = next(iter(es))\n\n lengths_key = model.src_lengths_key\n src_field = lengths_key.split('_')[0]\n src_array = batch_dict[src_field]\n if max_examples > 0:\n max_examples = min(max_examples, src_array.shape[0])\n\n for i in range(max_examples):\n example = {}\n # Batch first, so this gets a single example at once\n for k, v in batch_dict.items():\n example[k] = v[i, np.newaxis]\n\n logger.info('========================================================================')\n sent = lookup_sentence(rlut1, example[src_field].squeeze(), reverse=reverse)\n logger.info('[OP] %s' % sent)\n sent = lookup_sentence(rlut2, example['tgt'].squeeze())\n logger.info('[Actual] %s' % sent)\n dst_i = model.predict(example)[0][0][0]\n sent = lookup_sentence(rlut2, dst_i)\n logger.info('Guess: %s' % sent)\n logger.info('------------------------------------------------------------------------')\n\n\n@export\ndef convert_seq2seq_golds(indices, lengths, rlut, subword_fix=lambda x: x):\n \"\"\"Convert indices to words and format like a bleu reference corpus.\n\n :param indices: The indices of the gold sentence. Should be in the shape\n `[B, T]`. Iterating though axis=1 should yield ints.\n :param lengths: The length of the gold sentences.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[List[str]]] Shape is [B, 1, T] where T is the number of\n words in that gold sentence\n \"\"\"\n golds = []\n for idx, l in zip(indices, lengths):\n gold = idx[:l]\n gold_str = lookup_sentence(rlut, gold)\n gold = subword_fix(gold_str).split()\n golds.append([gold])\n return golds\n\n\n@export\ndef convert_seq2seq_preds(indices, rlut, subword_fix=lambda x: x):\n \"\"\"Convert indices to words and format like a bleu hypothesis corpus.\n\n :param indices: The indices of the predicted sentence. Should be in the\n shape `[B, T]`. Iterating though axis=1 should yield ints.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[str]] Shape is [B, T] where T is the number of\n words in that predicted sentence\n \"\"\"\n preds = []\n for idx in indices:\n pred_str = lookup_sentence(rlut, idx)\n pred = subword_fix(pred_str).split()\n preds.append(pred)\n return preds\n\n\n\nMEAD_HUB_MODULES = []\nDEFAULT_DATA_CACHE = os.path.expanduser('~/.bl-data')\n\n\n@export\ndef import_user_module(module_name: str, data_download_cache: Optional[str] = None):\n \"\"\"Load a module that is in the python path\n :param model_name: (``str``) - the name of the module\n :return:\n \"\"\"\n if not data_download_cache and os.path.exists(DEFAULT_DATA_CACHE):\n data_download_cache = DEFAULT_DATA_CACHE\n if data_download_cache:\n if module_name.startswith(\"hub:\") or module_name.startswith(\"http\"):\n if module_name.startswith(\"hub:\"):\n vec = module_name.split(\":\")\n version = vec[1]\n addons_literal = vec[2]\n rest = \":\".join(vec[3:])\n if not rest.endswith(\".py\"):\n rest += \".py\"\n if addons_literal != \"addons\":\n raise Exception(\"We only support downloading addons right now\")\n module_name = f\"http://raw.githubusercontent.com/mead-ml/hub/master/{version}/addons/{rest}\"\n if module_name in MEAD_HUB_MODULES:\n logger.warning(f\"Skipping previously downloaded module: {module_name}\")\n return None\n MEAD_HUB_MODULES.append(module_name)\n module_name = AddonDownloader(module_name, data_download_cache, cache_ignore=True).download()\n\n # TODO: get rid of this!\n addon_path = os.path.dirname(os.path.realpath(addons.__file__))\n idempotent_append(addon_path, sys.path)\n if any(module_name.endswith(suffix) for suffix in importlib.machinery.SOURCE_SUFFIXES):\n module_path = module_name\n module_name, _ = parse_module_as_path(module_path)\n # File based import from here https://docs.python.org/3.6/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, module_path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n # Set this module in sys.modules so later we can import the module by name when pickling things.\n sys.modules[module_name] = mod\n return mod\n mod = importlib.import_module(module_name)\n return mod\n\n" ]
[ [ "numpy.zeros", "numpy.sum", "numpy.argpartition", "numpy.argsort", "numpy.random.multinomial" ] ]
JoaquinAmatRodrigo/skforecaster
[ "5ee79a51960a27db9e169706014528eae403e1c2" ]
[ "skforecast/ForecasterAutoregCustom/tests/test_predict.py" ]
[ "from pytest import approx\nimport numpy as np\nimport pandas as pd\nfrom skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom\nfrom sklearn.linear_model import LinearRegression\n\n\ndef create_predictors(y):\n '''\n Create first 5 lags of a time series.\n '''\n \n lags = y[-1:-6:-1]\n \n return lags \n\n\ndef test_predict_output_when_regressor_is_LinearRegression():\n '''\n Test predict output when using LinearRegression as regressor.\n '''\n forecaster = ForecasterAutoregCustom(\n regressor = LinearRegression(),\n fun_predictors = create_predictors,\n window_size = 5\n )\n forecaster.fit(y=pd.Series(np.arange(50)))\n results = forecaster.predict(steps=5)\n expected = pd.Series(\n data = np.array([50., 51., 52., 53., 54.]),\n index = pd.RangeIndex(start=50, stop=55, step=1),\n name = 'pred'\n )\n pd.testing.assert_series_equal(results, expected)" ]
[ [ "numpy.array", "sklearn.linear_model.LinearRegression", "pandas.RangeIndex", "pandas.testing.assert_series_equal", "numpy.arange" ] ]
bastienboutonnet/sheetwork
[ "7aa757ed12375ddd2c56502b721d91146d22b7ea" ]
[ "tests/mockers.py" ]
[ "import pandas\nfrom pandas import Timestamp\n\nEXPECTED_CONFIG = {\n \"sheet_name\": \"df_dropper\",\n \"sheet_key\": \"sample\",\n \"target_schema\": \"sand\",\n \"target_table\": \"bb_test_sheetwork\",\n \"columns\": [\n {\"name\": \"col_a\", \"datatype\": \"int\"},\n {\"name\": \"col_b\", \"datatype\": \"varchar\"},\n {\"name\": \"col_one\", \"datatype\": \"varchar\"},\n {\"name\": \"renamed_col\", \"identifier\": \"long ass name\", \"datatype\": \"varchar\"},\n ],\n \"excluded_columns\": [\"to_exclude\"],\n}\n\nEXPECTED_DEV_TEST_PROFILE = {\n \"db_type\": \"snowflake\",\n \"account\": \"a\",\n \"user\": \"b\",\n \"password\": \"c\",\n \"role\": \"d\",\n \"database\": \"e\",\n \"warehouse\": \"f\",\n \"schema\": \"g\",\n \"guser\": \"[email protected]\",\n}\n\nNO_COLS_EXPECTED_CONFIG = {\n \"sheet_name\": \"no_cols\",\n \"sheet_key\": \"sample\",\n \"target_schema\": \"sand\",\n \"target_table\": \"bb_test_sheetwork\",\n}\n\nEXPECTED_SHEETWORK_PROJECT = {\n \"name\": \"sheetwork_test\",\n \"target_schema\": \"sand\",\n \"always_create_table\": True,\n \"always_create_schema\": True,\n \"destructive_create_table\": True,\n}\n\n\nEXPECTED_SHEETWORK_PROJECT_ALL_CREATE = {\n \"name\": \"sheetwork_test\",\n \"target_schema\": \"sand\",\n \"always_create_objects\": True,\n \"destructive_create_table\": True,\n}\n\nEXPECTED_SHEETWORK_PROJECT_DEPRECATED = {\n \"name\": \"sheetwork_test\",\n \"target_schema\": \"sand\",\n \"always_create\": True,\n}\n\nDIRTY_DF = {\n \"col_a\": [1, 2, 32],\n \"col b\": [\"as . \", \"b\", \" c\"],\n \"1. ??col_one\": [\"aa\", \"bb\", \"cc\"],\n \"\": [\"q\", \"q\", \"q\"],\n \"col_1\": [1, 2, 33],\n \"long ass name\": [\"foo\", \"bar\", \"fizz\"],\n \"col_with_empty_string\": [\"1\", \"\", \"2\"],\n}\n\nTO_CAST_DF = {\n \"col_int\": [\"1\", \"2\", \"32\"],\n \"col_varchar\": [\"foo\", \"bar\", \"fizz\"],\n \"created_date\": [\"2019/01/01\", \"2019/01/02\", \"2019/01/03\"],\n \"col_bool\": [\"false\", \"False\", \"true\"],\n \"col_numeric\": [\"1.2\", \"1.3\", \"1\"],\n}\n\nCAST_DF = {\n # this non conversion to int is intentional until we have a better fix see #205, #204\n \"col_int\": {0: \"1\", 1: \"2\", 2: \"32\"},\n \"col_varchar\": {0: \"foo\", 1: \"bar\", 2: \"fizz\"},\n \"created_date\": {\n 0: Timestamp(\"2019-01-01 00:00:00\"),\n 1: Timestamp(\"2019-01-02 00:00:00\"),\n 2: Timestamp(\"2019-01-03 00:00:00\"),\n },\n \"col_bool\": {0: False, 1: False, 2: True},\n \"col_numeric\": {0: 1.2, 1: 1.3, 2: 1},\n}\n\nCASING_DF = {\n \"CamelCasedCol\": [1, 2, 3],\n \"snake_cased_col\": [1, 2, 3],\n}\n\nSNAKE_CASED_COLS = [\"camel_cased_col\", \"snake_cased_col\"]\n\nCAMEL_CASED_COLS = [\"CamelCasedCol\", \"SnakeCasedCol\"]\n\nCLEAN_DF = {\n \"col_a\": {0: 1, 1: 2, 2: 32},\n \"col_b\": {0: \"as .\", 1: \"b\", 2: \"c\"},\n \"1_col_one\": {0: \"aa\", 1: \"bb\", 2: \"cc\"},\n \"col_1\": {0: 1, 1: 2, 2: 33},\n \"long_ass_name\": {0: \"foo\", 1: \"bar\", 2: \"fizz\"},\n \"col_with_empty_string\": {0: \"1\", 1: \"\", 2: \"2\"},\n}\n\nRENAMED_DF = {\n \"col_a\": {0: 1, 1: 2, 2: 32},\n \"col_b\": {0: \"as .\", 1: \"b\", 2: \"c\"},\n \"1_col_one\": {0: \"aa\", 1: \"bb\", 2: \"cc\"},\n \"col_1\": {0: 1, 1: 2, 2: 33},\n \"renamed_col\": {0: \"foo\", 1: \"bar\", 2: \"fizz\"},\n}\n\nDROP_COL_DF = {\n \"col_a\": [1, 2, 32],\n \"col b\": [\"as . \", \"b\", \" c\"],\n \"1. col_one\": [\"aa\", \"bb\", \"cc\"],\n \"\": [\"q\", \"q\", \"q\"],\n \"long ass name\": [\"foo\", \"bar\", \"fizz\"],\n \"to_exclude\": [\"garbage1\", \"garbage2\", \"garbage3\"],\n}\n\nRENAMED_COLS = [\n \"col_a\",\n \"col b\",\n \"1. ??col_one\",\n \"\",\n \"col_1\",\n \"renamed_col\",\n \"col_with_empty_string\",\n]\n\nEXCLUDED_DF_COLS = [\"col_a\", \"col b\", \"1. col_one\", \"\", \"long ass name\"]\n\nEMPTY_HEADER_COLUMNS_DF = {\n \"col_ a \": [1, 2, 32],\n \" \": [\"as . \", \"b\", \" c\"],\n \"1. col_one\": [\"aa\", \"bb\", \"cc\"],\n \"\": [\"q\", \"q\", \"q\"],\n \" col_1\": [1, 2, 33],\n}\n\nNON_EMPTY_HEADER = {\n \"col_a\": [1, 2, 32],\n \"col b\": [\"as . \", \"b\", \" c\"],\n \"1. col_one\": [\"aa\", \"bb\", \"cc\"],\n \"col_1\": [1, 2, 33],\n \"long ass name\": [\"foo\", \"bar\", \"fizz\"],\n \"col_with_empty_string\": [\"1\", \"\", \"2\"],\n}\n\n\ndef generate_test_df(df):\n test_df = pandas.DataFrame.from_dict(df)\n return test_df\n" ]
[ [ "pandas.DataFrame.from_dict", "pandas.Timestamp" ] ]
exemuel/indiegogo-scrapper
[ "b46f79f19ecf32d7b2bb98c0102f1e398eda08a9" ]
[ "src/main.py" ]
[ "# load standard libraries\nimport sys\nimport os\n\n# load data analysis library\nimport pandas as pd\n\n# load modules in scraper\nfrom scraper import *\nimport multiprocessing\n\ndef main():\n args = sys.argv[1:]\n\n if os.path.exists(\"chromedriver\\chromedriver.exe\") is False:\n print(\"put chromedriver.exe into chromedriver directory.\")\n else:\n if os.path.exists(\"data\\Indiegogo.csv\") is False:\n print(\"put Indiegogo.csv into data directory.\")\n else:\n if len(args) < 1:\n print(\"define the json filename.\")\n elif args[0].find(\".json\")!=-1:\n dir_path_data = \"data\"\n dir_path_output = \"out/\" + args[0]\n\n filenames = next(os.walk(dir_path_data), (None, None, []))[2]\n\n list_project_site = []\n for ele in filenames:\n df_indiegogo = pd.read_csv(dir_path_data + \"\\\\\" + ele)\n list_project_site.extend(extract_project_url(df_indiegogo))\n\n list_project_site = [[i, e] for i, e in enumerate(list_project_site)]\n \n try:\n f = open(dir_path_output, \"r\")\n data = json.loads(f.read())\n f.close()\n except Exception as e:\n data = {}\n \n list_processed = [e for e in list_project_site if e[1] \\\n not in [data[key][\"site\"] for key in data]]\n \n # process-based parallelism\n # use one third of the available processors\n processor = int(-1 * (multiprocessing.cpu_count()/3) // 1 * -1)\n # use one fourth of the available processors\n # processor = int(multiprocessing.cpu_count()/4)\n pool = multiprocessing.Pool(processes=processor)\n\n print(\"*** start ***\")\n\n for b in [list_processed[i:i + processor] for i in range(0, len(list_processed), processor)]:\n dict_tmp = {}\n list_bres = pool.map(scrapes, b)\n \n for i in list_bres:\n dict_tmp.update(i)\n\n if len(data) < 1:\n with open(dir_path_output, 'w') as file:\n json.dump(dict_tmp, file, indent = 4)\n else:\n with open(dir_path_output, \"r+\") as file:\n old_data = json.load(file)\n old_data.update(dict_tmp)\n file.seek(0)\n json.dump(old_data, file, indent = 4)\n print(\"scraped\", str(b[-1][0]+1), \"of\", str(len(list_project_site)-1))\n break\n else:\n print(\"wrong output file extension. use json extension.\")\n print(\"*** end ***\")\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.read_csv" ] ]
Chase1325/IFUTR
[ "f1e9bc241ff5878d32bc6420b8f7d8d28ee93292" ]
[ "ifutr/scripts/localization/reports/generate_Table.py" ]
[ "#Generate the data tables\nimport matplotlib as m\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef dataTable(samples):\n\n celltext_100 = []\n celltext_1225 = []\n celltext_2025 = []\n\n columns = ('X [mm]', 'Y [mm]', r'$\\bar{x}$', r'$\\bar{y}$',r'$\\bar{e_{xy}}$',\n r'$\\bar{\\sigma_{xy}}$', r'$\\bar{\\sigma_{xy}^2}$')\n\n hcell,wcell=0.5, 1\n hpad,wpad= 1, 0\n\n for s in samples:\n true = s.getTrue()\n mean = s.getMean()\n var = s.getVar()\n std = s.getStd()\n err = s.getErr()\n\n if((true[2]==10)or(true[2]==100)):\n celltext_100.append([true[0], true[1], round(mean[0],2), round(mean[1],2),\n round(np.average([err[0], err[1]]),2), round(np.average([std[0],std[1]]),2),\n round(np.average([var[0],var[1]]),2)])\n\n if(true[2]==1225):\n celltext_1225.append([true[0], true[1], round(mean[0],2), round(mean[1],2),\n round(np.average([err[0], err[1]]),2), round(np.average([std[0],std[1]]),2),\n round(np.average([var[0],var[1]]),2)])\n\n if(true[2]==2025):\n celltext_2025.append([true[0], true[1], round(mean[0],2), round(mean[1],2),\n round(np.average([err[0], err[1]]),2), round(np.average([std[0],std[1]]),2),\n round(np.average([var[0],var[1]]),2)])\n\n nrows,ncols = len(celltext_100)+1, len(columns)\n\n plt.figure(13)\n plt.title('Data Table for Z=100mm')\n ax = plt.gca()\n ax.axis('off')\n ax.table(cellText=celltext_100,colLabels=columns,loc='center')\n\n plt.figure(14)\n plt.title('Data Table for Z=1225mm')\n ax = plt.gca()\n ax.axis('off')\n ax.table(cellText=celltext_1225,colLabels=columns,loc='center')\n\n plt.figure(15)\n plt.title('Data Table for Z=2025mm')\n ax = plt.gca()\n ax.axis('off')\n ax.table(cellText=celltext_2025,colLabels=columns,loc='center')\n\n\n fig1 = plt.figure(13)\n fig2 = plt.figure(14)\n fig3 = plt.figure(15)\n\n return fig1, fig2, fig3\n" ]
[ [ "numpy.average", "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "matplotlib.pyplot.figure" ] ]
MaryamHoss/BESD
[ "294e9b417cc5866e76be6faad2357ba8d26e61a9" ]
[ "data_processing/eeg_preprocessing.py" ]
[ "\"\"\"\npaper of interest:\nprobably best tutorial: https://mne.tools/dev/auto_examples/decoding/plot_receptive_field_mtrf.html#sphx-glr-auto-examples-decoding-plot-receptive-field-mtrf-py\n\n\"\"\"\n\nfrom mne.externals.pymatreader import read_mat\nimport os\nimport mne\nfrom mne.preprocessing import ICA\nimport numpy as np\nfrom GenericTools.StayOrganizedTools.VeryCustomSacred import CustomExperiment\n\nCDIR = os.path.dirname(os.path.realpath(__file__))\nsuperCDIR = os.path.join(*[CDIR, '..'])\nex = CustomExperiment('re', base_dir=superCDIR, GPU=0, seed=14)\n\n\[email protected]\ndef main():\n config_dir = os.path.join(*[CDIR, ex.observers[0].basedir, '1'])\n images_dir = os.path.join(*[CDIR, ex.observers[0].basedir, 'images'])\n models_dir = os.path.join(*[CDIR, ex.observers[0].basedir, 'trained_models'])\n\n file_name = '../data/Subject1_Run1.mat'\n mat_data = read_mat(file_name)\n data = mat_data['eegData'].T[:, :22000] * 1e-6\n #n_timesteps = data.shape[1]\n print(data)\n\n # remove DC\n mean_data = np.mean(data, axis=1)[:, np.newaxis]\n data = data - mean_data\n\n fs = mat_data['fs']\n montage = mne.channels.make_standard_montage('biosemi128')\n info = mne.create_info(ch_names=montage.ch_names, sfreq=fs, ch_types='eeg').set_montage(montage)\n\n raw = mne.io.RawArray(data, info)\n raw.info['bads'].append('A22')\n\n # cropping the raw object to just three seconds for easier plotting\n #raw.crop(tmin=0, tmax=3).load_data()\n raw.plot()\n\n # Preprocessing following mne tutorial\n # https://mne.tools/dev/auto_tutorials/preprocessing/plot_40_artifact_correction_ica.html#tut-artifact-ica\n\n # Filtering to remove slow drifts\n filt_raw = raw.copy()\n filt_raw.load_data().filter(l_freq=1., h_freq=None)\n\n # Fitting and plotting the ICA solution\n ica = ICA(n_components=15, random_state=0)\n ica.fit(filt_raw)\n\n raw.load_data()\n fig = ica.plot_sources(raw)\n fig.show()\n ica.plot_components()\n\n # blinks\n exclusion_list = [0, 1, 2]\n ica.plot_overlay(raw, exclude=exclusion_list, picks='eeg')\n ica.plot_properties(raw, picks=exclusion_list)\n\n # Selecting ICA components manually\n\n ica.exclude = exclusion_list\n\n # ica.apply() changes the Raw object in-place, so let's make a copy first:\n reconst_raw = raw.copy()\n ica.apply(reconst_raw)\n ica.apply(filt_raw)\n eeg_data_interp = filt_raw.copy().interpolate_bads(reset_bads=False)\n\n reconst_raw.plot()\n filt_raw.plot()\n raw.plot()\n eeg_data_interp.plot()\n ica.plot_components()\n" ]
[ [ "numpy.mean" ] ]
howards11/agents
[ "8d5627d9b9c3680468a63564c25a4d82fa1befb0" ]
[ "tf_agents/agents/cql/cql_sac_agent_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.agents.cql.cql_sac_agent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tf_agents.agents.cql import cql_sac_agent\nfrom tf_agents.agents.ddpg import critic_network\nfrom tf_agents.agents.ddpg import critic_rnn_network\nfrom tf_agents.agents.sac import tanh_normal_projection_network\nfrom tf_agents.networks import actor_distribution_network\nfrom tf_agents.networks import actor_distribution_rnn_network\nfrom tf_agents.networks import network\nfrom tf_agents.networks import utils as network_utils\nfrom tf_agents.specs import distribution_spec\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\nfrom tf_agents.utils import test_utils\n\n\nclass _MockDistribution(object):\n\n def __init__(self, action):\n self._action = action\n\n def sample(self, num_samples=None, seed=None):\n del seed\n if not num_samples:\n return self._action\n\n actions = tf.tile(self._action, tf.constant([num_samples, 1]))\n actions = tf.reshape(\n actions, [num_samples, self._action.shape[0], self._action.shape[1]])\n return actions\n\n def log_prob(self, sample):\n return tf.constant(10., shape=sample.shape)\n\n\nclass DummyActorPolicy(object):\n\n def __init__(self,\n time_step_spec,\n action_spec,\n actor_network,\n training=False):\n del time_step_spec\n del actor_network\n del training\n single_action_spec = tf.nest.flatten(action_spec)[0]\n # Action is maximum of action range.\n self._action = single_action_spec.maximum\n self._action_spec = action_spec\n self.info_spec = ()\n\n def action(self, time_step):\n observation = time_step.observation\n batch_size = observation.shape[0]\n action = tf.constant(self._action, dtype=tf.float32, shape=[batch_size, 1])\n return policy_step.PolicyStep(action=action)\n\n def distribution(self, time_step, policy_state=()):\n del policy_state\n action = self.action(time_step).action\n return policy_step.PolicyStep(action=_MockDistribution(action))\n\n def get_initial_state(self, batch_size):\n del batch_size\n return ()\n\n\nclass DummyCriticNet(network.Network):\n\n def __init__(self, l2_regularization_weight=0.0, shared_layer=None):\n super(DummyCriticNet, self).__init__(\n input_tensor_spec=(tensor_spec.TensorSpec([2], tf.float32),\n tensor_spec.TensorSpec([1], tf.float32)),\n state_spec=(),\n name=None)\n self._l2_regularization_weight = l2_regularization_weight\n self._value_layer = tf.keras.layers.Dense(\n 1,\n kernel_regularizer=tf.keras.regularizers.l2(l2_regularization_weight),\n kernel_initializer=tf.constant_initializer([[0], [1]]),\n bias_initializer=tf.constant_initializer([[0]]))\n self._shared_layer = shared_layer\n self._action_layer = tf.keras.layers.Dense(\n 1,\n kernel_regularizer=tf.keras.regularizers.l2(l2_regularization_weight),\n kernel_initializer=tf.constant_initializer([[1]]),\n bias_initializer=tf.constant_initializer([[0]]))\n\n def copy(self, name=''):\n del name\n return DummyCriticNet(\n l2_regularization_weight=self._l2_regularization_weight,\n shared_layer=self._shared_layer)\n\n def call(self, inputs, step_type, network_state=()):\n del step_type\n observation, actions = inputs\n actions = tf.cast(tf.nest.flatten(actions)[0], tf.float32)\n\n states = tf.cast(tf.nest.flatten(observation)[0], tf.float32)\n\n s_value = self._value_layer(states)\n if self._shared_layer:\n s_value = self._shared_layer(s_value)\n a_value = self._action_layer(actions)\n # Biggest state is best state.\n q_value = tf.reshape(s_value + a_value, [-1])\n return q_value, network_state\n\n\nclass DummyActorNet(network.DistributionNetwork):\n\n def __init__(self,\n input_spec,\n action_spec,\n preprocessing_layers=None,\n name=None):\n output_spec = self._get_normal_distribution_spec(action_spec)\n super(DummyActorNet, self).__init__(\n input_spec, (), output_spec=output_spec, name='DummyActorNet')\n self._action_spec = action_spec\n self._flat_action_spec = tf.nest.flatten(self._action_spec)[0]\n\n self._dummy_layers = (preprocessing_layers or []) + [\n tf.keras.layers.Dense(\n self._flat_action_spec.shape.num_elements() * 2,\n kernel_initializer=tf.constant_initializer([[2.0, 1.0], [1.0, 1.0]\n ]),\n bias_initializer=tf.constant_initializer([5.0, 5.0]),\n activation=None,\n )\n ]\n\n def _get_normal_distribution_spec(self, sample_spec):\n is_multivariate = sample_spec.shape.ndims > 0\n param_properties = tfp.distributions.Normal.parameter_properties()\n input_param_spec = { # pylint: disable=g-complex-comprehension\n name: tensor_spec.TensorSpec(\n shape=properties.shape_fn(sample_spec.shape),\n dtype=sample_spec.dtype)\n for name, properties in param_properties.items()\n }\n\n def distribution_builder(*args, **kwargs):\n if is_multivariate:\n # For backwards compatibility, and because MVNDiag does not support\n # `param_static_shapes`, even when using MVNDiag the spec\n # continues to use the terms 'loc' and 'scale'. Here we have to massage\n # the construction to use 'scale' for kwarg 'scale_diag'. Since they\n # have the same shape and dtype expectationts, this is okay.\n kwargs = kwargs.copy()\n kwargs['scale_diag'] = kwargs['scale']\n del kwargs['scale']\n return tfp.distributions.MultivariateNormalDiag(*args, **kwargs)\n else:\n return tfp.distributions.Normal(*args, **kwargs)\n\n return distribution_spec.DistributionSpec(\n distribution_builder, input_param_spec, sample_spec=sample_spec)\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n hidden_state = tf.cast(tf.nest.flatten(inputs), tf.float32)[0]\n\n # Calls coming from agent.train() has a time dimension. Direct loss calls\n # may not have a time dimension. It order to make BatchSquash work, we need\n # to specify the outer dimension properly.\n has_time_dim = nest_utils.get_outer_rank(inputs,\n self.input_tensor_spec) == 2\n outer_rank = 2 if has_time_dim else 1\n batch_squash = network_utils.BatchSquash(outer_rank)\n hidden_state = batch_squash.flatten(hidden_state)\n\n for layer in self._dummy_layers:\n hidden_state = layer(hidden_state)\n\n actions, stdevs = tf.split(hidden_state, 2, axis=1)\n actions = batch_squash.unflatten(actions)\n stdevs = batch_squash.unflatten(stdevs)\n actions = tf.nest.pack_sequence_as(self._action_spec, [actions])\n stdevs = tf.nest.pack_sequence_as(self._action_spec, [stdevs])\n\n return self.output_spec.build_distribution(\n loc=actions, scale=stdevs), network_state\n\n\nclass CqlSacAgentTest(test_utils.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(CqlSacAgentTest, self).setUp()\n self._obs_spec = tensor_spec.BoundedTensorSpec([2],\n tf.float32,\n minimum=0,\n maximum=1)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec([1], tf.float32, -1, 1)\n self._random_seed = 0\n\n @parameterized.parameters((1.0, 10, -2.307371), (10.0, 10, -23.073713))\n def testCqlLoss(self, cql_alpha, num_cql_samples, expected_loss):\n agent = cql_sac_agent.CqlSacAgent(\n self._time_step_spec,\n self._action_spec,\n critic_network=DummyCriticNet(),\n actor_network=None,\n actor_optimizer=None,\n critic_optimizer=None,\n alpha_optimizer=None,\n cql_alpha=cql_alpha,\n num_cql_samples=num_cql_samples,\n include_critic_entropy_term=False,\n use_lagrange_cql_alpha=False,\n random_seed=self._random_seed,\n actor_policy_ctor=DummyActorPolicy)\n\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_steps = ts.restart(observations, batch_size=2)\n actions = tf.constant([[5], [6]], dtype=tf.float32)\n\n loss = agent._cql_loss(\n time_steps, actions, training=False) * agent._get_cql_alpha()\n\n self.initialize_v1_variables()\n loss_ = self.evaluate(loss)\n\n self.assertAllClose(loss_, expected_loss)\n\n def testAgentTrajectoryTrain(self):\n actor_net = actor_distribution_network.ActorDistributionNetwork(\n self._obs_spec,\n self._action_spec,\n fc_layer_params=(10,),\n continuous_projection_net=tanh_normal_projection_network\n .TanhNormalProjectionNetwork)\n\n agent = cql_sac_agent.CqlSacAgent(\n self._time_step_spec,\n self._action_spec,\n critic_network=DummyCriticNet(),\n actor_network=actor_net,\n actor_optimizer=tf.compat.v1.train.AdamOptimizer(0.001),\n critic_optimizer=tf.compat.v1.train.AdamOptimizer(0.001),\n alpha_optimizer=tf.compat.v1.train.AdamOptimizer(0.001),\n cql_alpha=5.0,\n num_cql_samples=1,\n include_critic_entropy_term=False,\n use_lagrange_cql_alpha=False)\n\n trajectory_spec = trajectory.Trajectory(\n step_type=self._time_step_spec.step_type,\n observation=self._time_step_spec.observation,\n action=self._action_spec,\n policy_info=(),\n next_step_type=self._time_step_spec.step_type,\n reward=tensor_spec.BoundedTensorSpec(\n [], tf.float32, minimum=0.0, maximum=1.0, name='reward'),\n discount=self._time_step_spec.discount)\n\n sample_trajectory_experience = tensor_spec.sample_spec_nest(\n trajectory_spec, outer_dims=(3, 2))\n agent.train(sample_trajectory_experience)\n\n def testAgentTransitionTrain(self):\n actor_net = actor_distribution_network.ActorDistributionNetwork(\n self._obs_spec,\n self._action_spec,\n fc_layer_params=(10,),\n continuous_projection_net=tanh_normal_projection_network\n .TanhNormalProjectionNetwork)\n\n agent = cql_sac_agent.CqlSacAgent(\n self._time_step_spec,\n self._action_spec,\n critic_network=DummyCriticNet(),\n actor_network=actor_net,\n actor_optimizer=tf.compat.v1.train.AdamOptimizer(0.001),\n critic_optimizer=tf.compat.v1.train.AdamOptimizer(0.001),\n alpha_optimizer=tf.compat.v1.train.AdamOptimizer(0.001),\n cql_alpha=5.0,\n num_cql_samples=1,\n include_critic_entropy_term=False,\n use_lagrange_cql_alpha=False)\n\n time_step_spec = self._time_step_spec._replace(\n reward=tensor_spec.BoundedTensorSpec(\n [], tf.float32, minimum=0.0, maximum=1.0, name='reward'))\n\n transition_spec = trajectory.Transition(\n time_step=time_step_spec,\n action_step=policy_step.PolicyStep(action=self._action_spec,\n state=(),\n info=()),\n next_time_step=time_step_spec)\n\n sample_trajectory_experience = tensor_spec.sample_spec_nest(\n transition_spec, outer_dims=(3,))\n agent.train(sample_trajectory_experience)\n\n @parameterized.parameters((False, 0., False, [16.3, 28.1]),\n (True, 0., True, [7.3, 19.1]),\n (False, 0.1, False, [16.269377, 28.07928]),\n (False, 0.1, True, [16.269377, 28.07928]))\n def testCriticLoss(self, include_critic_entropy_term, reward_noise_variance,\n use_tf_variable, td_targets):\n if use_tf_variable:\n reward_noise_variance = tf.Variable(reward_noise_variance)\n agent = cql_sac_agent.CqlSacAgent(\n self._time_step_spec,\n self._action_spec,\n critic_network=DummyCriticNet(),\n actor_network=None,\n actor_optimizer=None,\n critic_optimizer=None,\n alpha_optimizer=None,\n cql_alpha=1.0,\n num_cql_samples=1,\n include_critic_entropy_term=include_critic_entropy_term,\n use_lagrange_cql_alpha=False,\n reward_noise_variance=reward_noise_variance,\n actor_policy_ctor=DummyActorPolicy)\n\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_steps = ts.restart(observations, batch_size=2)\n actions = tf.constant([[5], [6]], dtype=tf.float32)\n\n rewards = tf.constant([10, 20], dtype=tf.float32)\n discounts = tf.constant([0.9, 0.9], dtype=tf.float32)\n next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)\n next_time_steps = ts.transition(next_observations, rewards, discounts)\n\n pred_td_targets = [7., 10.]\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n # Expected critic loss has factor of 2, for the two TD3 critics.\n expected_loss = self.evaluate(2 * tf.compat.v1.losses.mean_squared_error(\n tf.constant(td_targets), tf.constant(pred_td_targets)))\n\n loss = agent._critic_loss_with_optional_entropy_term(\n time_steps,\n actions,\n next_time_steps,\n td_errors_loss_fn=tf.math.squared_difference)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n loss_ = self.evaluate(loss)\n self.assertAllClose(loss_, expected_loss)\n\n @parameterized.parameters((0, 6), (1, 13.404237))\n def testActorLoss(self, num_bc_steps, expected_loss):\n agent = cql_sac_agent.CqlSacAgent(\n self._time_step_spec,\n self._action_spec,\n critic_network=DummyCriticNet(),\n actor_network=DummyActorNet(self._obs_spec, self._action_spec),\n actor_optimizer=None,\n critic_optimizer=None,\n alpha_optimizer=None,\n cql_alpha=1.0,\n num_cql_samples=1,\n include_critic_entropy_term=False,\n use_lagrange_cql_alpha=False,\n num_bc_steps=num_bc_steps,\n actor_policy_ctor=DummyActorPolicy)\n\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_steps = ts.restart(observations, batch_size=2)\n actions = tf.constant([[5], [6]], dtype=tf.float32)\n\n loss = agent.actor_loss(time_steps, actions)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n loss_ = self.evaluate(loss)\n self.assertAllClose(loss_, expected_loss)\n\n @parameterized.parameters(\n (0.0, 10, False, False, 0.847626),\n (1.0, 10, False, True, 4.568825),\n (10.0, 10, False, False, 38.059581),\n (10.0, 10, True, False, 46.149696))\n def testTrainWithRnn(self, cql_alpha, num_cql_samples,\n include_critic_entropy_term, use_lagrange_cql_alpha,\n expected_loss):\n actor_net = actor_distribution_rnn_network.ActorDistributionRnnNetwork(\n self._obs_spec,\n self._action_spec,\n input_fc_layer_params=None,\n output_fc_layer_params=None,\n conv_layer_params=None,\n lstm_size=(40,),\n )\n\n critic_net = critic_rnn_network.CriticRnnNetwork(\n (self._obs_spec, self._action_spec),\n observation_fc_layer_params=(16,),\n action_fc_layer_params=(16,),\n joint_fc_layer_params=(16,),\n lstm_size=(16,),\n output_fc_layer_params=None,\n )\n\n counter = common.create_variable('test_train_counter')\n\n optimizer_fn = tf.compat.v1.train.AdamOptimizer\n\n agent = cql_sac_agent.CqlSacAgent(\n self._time_step_spec,\n self._action_spec,\n critic_network=critic_net,\n actor_network=actor_net,\n actor_optimizer=optimizer_fn(1e-3),\n critic_optimizer=optimizer_fn(1e-3),\n alpha_optimizer=optimizer_fn(1e-3),\n cql_alpha=cql_alpha,\n num_cql_samples=num_cql_samples,\n include_critic_entropy_term=include_critic_entropy_term,\n use_lagrange_cql_alpha=use_lagrange_cql_alpha,\n random_seed=self._random_seed,\n train_step_counter=counter,\n )\n\n batch_size = 5\n observations = tf.constant(\n [[[1, 2], [3, 4], [5, 6]]] * batch_size, dtype=tf.float32)\n actions = tf.constant([[[0], [1], [1]]] * batch_size, dtype=tf.float32)\n time_steps = ts.TimeStep(\n step_type=tf.constant([[1] * 3] * batch_size, dtype=tf.int32),\n reward=tf.constant([[1] * 3] * batch_size, dtype=tf.float32),\n discount=tf.constant([[1] * 3] * batch_size, dtype=tf.float32),\n observation=observations)\n\n experience = trajectory.Trajectory(time_steps.step_type, observations,\n actions, (), time_steps.step_type,\n time_steps.reward, time_steps.discount)\n\n # Force variable creation.\n agent.policy.variables()\n\n if not tf.executing_eagerly():\n # Get experience first to make sure optimizer variables are created and\n # can be initialized.\n experience = agent.train(experience)\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertEqual(self.evaluate(counter), 0)\n self.evaluate(experience)\n self.assertEqual(self.evaluate(counter), 1)\n else:\n self.assertEqual(self.evaluate(counter), 0)\n loss = self.evaluate(agent.train(experience))\n self.assertAllClose(loss.loss, expected_loss)\n self.assertEqual(self.evaluate(counter), 1)\n\n @parameterized.parameters(\n (True, False, (-1, 10.0), 5.0, 3.032653, 2.895160, 3.130782),\n (False, False, (-1, 0), 5.0, 5.0, 2.895160, 3.130782),\n (False, True, (-1, 0), 5.0, 6.0, 2.895160, 3.130782))\n def testTrainWithLagrange(self, use_lagrange_cql_alpha,\n use_variable_for_cql_alpha,\n log_cql_alpha_clipping,\n expected_cql_alpha_step_one,\n expected_cql_alpha_step_two,\n expected_cql_loss_step_one,\n expected_cql_loss_step_two):\n if use_variable_for_cql_alpha:\n cql_alpha = tf.Variable(5.0)\n cql_alpha_var = cql_alpha # Getting around type checking.\n else:\n cql_alpha = 5.0\n cql_alpha_learning_rate = 0.5\n cql_tau = 10\n num_cql_samples = 5\n\n actor_net = actor_distribution_network.ActorDistributionNetwork(\n self._obs_spec, self._action_spec, fc_layer_params=None)\n critic_net = critic_network.CriticNetwork(\n (self._obs_spec, self._action_spec),\n observation_fc_layer_params=(16,),\n action_fc_layer_params=(16,),\n joint_fc_layer_params=(16,),\n kernel_initializer='glorot_uniform',\n last_kernel_initializer='glorot_uniform')\n\n counter = common.create_variable('test_train_counter')\n optimizer_fn = tf.compat.v1.train.AdamOptimizer\n agent = cql_sac_agent.CqlSacAgent(\n self._time_step_spec,\n self._action_spec,\n critic_network=critic_net,\n actor_network=actor_net,\n actor_optimizer=optimizer_fn(1e-3),\n critic_optimizer=optimizer_fn(1e-3),\n alpha_optimizer=optimizer_fn(1e-3),\n cql_alpha=cql_alpha,\n num_cql_samples=num_cql_samples,\n include_critic_entropy_term=False,\n use_lagrange_cql_alpha=use_lagrange_cql_alpha,\n cql_alpha_learning_rate=cql_alpha_learning_rate,\n cql_tau=cql_tau,\n random_seed=self._random_seed,\n log_cql_alpha_clipping=log_cql_alpha_clipping,\n train_step_counter=counter)\n\n batch_size = 5\n observations = tf.constant(\n [[[1, 2], [3, 4]]] * batch_size, dtype=tf.float32)\n actions = tf.constant([[[0], [1]]] * batch_size, dtype=tf.float32)\n time_steps = ts.TimeStep(\n step_type=tf.constant([[1] * 2] * batch_size, dtype=tf.int32),\n reward=tf.constant([[1] * 2] * batch_size, dtype=tf.float32),\n discount=tf.constant([[1] * 2] * batch_size, dtype=tf.float32),\n observation=observations)\n\n experience = trajectory.Trajectory(time_steps.step_type, observations,\n actions, (), time_steps.step_type,\n time_steps.reward, time_steps.discount)\n\n # Force variable creation.\n agent.policy.variables()\n\n if not tf.executing_eagerly():\n # Get experience first to make sure optimizer variables are created and\n # can be initialized.\n experience = agent.train(experience)\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertEqual(self.evaluate(counter), 0)\n self.evaluate(experience)\n self.assertEqual(self.evaluate(counter), 1)\n else:\n # Training step one.\n self.assertEqual(self.evaluate(counter), 0)\n loss = self.evaluate(agent.train(experience))\n self.assertEqual(self.evaluate(counter), 1)\n self.assertAllClose(loss.extra.cql_loss, expected_cql_loss_step_one)\n self.assertAllClose(loss.extra.cql_alpha, expected_cql_alpha_step_one)\n if use_lagrange_cql_alpha:\n self.assertGreater(loss.extra.cql_alpha_loss, 0)\n else:\n self.assertEqual(loss.extra.cql_alpha_loss, 0)\n\n # Training step two.\n if use_variable_for_cql_alpha:\n cql_alpha_var.assign_add(1)\n loss = self.evaluate(agent.train(experience))\n self.assertEqual(self.evaluate(counter), 2)\n self.assertAllClose(loss.extra.cql_loss, expected_cql_loss_step_two)\n self.assertAllClose(loss.extra.cql_alpha, expected_cql_alpha_step_two)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.nest.pack_sequence_as", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.constant_initializer", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.Variable", "tensorflow.reshape", "tensorflow.nest.flatten", "tensorflow.constant", "tensorflow.executing_eagerly", "tensorflow.test.main", "tensorflow.keras.regularizers.l2", "tensorflow.split" ] ]
mkolod/CarND-Capstone
[ "82bdeee73e0a788778e9276bbea8ad3376f4214d" ]
[ "ros/src/tl_detector/light_classification/tl_classifier.py" ]
[ "import numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport rospy\n\nfrom collections import defaultdict\nfrom io import StringIO\n\nfrom PIL import Image\n\nfrom .object_detection.utils import label_map_util\nfrom .object_detection.utils import visualization_utils as vis_util\n\nfrom styx_msgs.msg import TrafficLight\nfrom std_msgs.msg import String\nfrom scipy.stats import mode\n\nMODEL_NAME = 'light_classification/' + 'ssd_custom_graph'\nprint(\"MODEL_NAME = %s\" % MODEL_NAME)\nTF_VERSION = \"1.3\" # use 1.15 in Docker container\nPATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph_tf_%s.pb' % TF_VERSION\nPATH_TO_LABELS = 'light_classification/training/label_map.pbtxt'\nNUM_CLASSES = 3 \nSCORE_THRESH = 0.85\nclass_lookup = {\n 1 : TrafficLight.GREEN,\n 2 : TrafficLight.YELLOW,\n 3 : TrafficLight.RED,\n}\n\nclass NullContextManager(object):\n def __init__(self, dummy_resource=None):\n self.dummy_resource = dummy_resource\n def __enter__(self):\n return self.dummy_resource\n def __exit__(self, *args):\n pass\n\nclass TLClassifier(object):\n def __init__(self):\n #TODO load classifier\n self.detection_graph, self.label_map, self.categories, self.category_index = self.import_graph()\n self.tf_config = tf.ConfigProto()\n self.tf_config.gpu_options.allow_growth = True\n # TODO: check if we need detection_graph.as_default here\n self.sess = tf.Session(graph=self.detection_graph, config=self.tf_config)\n # Run fake data during init to warm up TensorFlow's memory allocator\n warmup_iter = 10\n for iter in range(warmup_iter):\n synth_data = np.random.randint(low=0, high=255, size=(600, 800, 3), dtype=np.uint8)\n self.inference(synth_data)\n light_detector_pub = rospy.Publisher('/tl_detections', String, queue_size=1)\n light_detector_pub.publish(String(\"Light detector bootstrap executed. Synthetic data passed through model without errors.\"))\n\n def import_graph(self):\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n return detection_graph, label_map, categories, category_index\n\n def inference(self, image):\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n\n image_np_expanded = np.expand_dims(image, axis=0)\n\n (boxes, scores, classes, num) = self.sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n return boxes, scores, classes, num\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n\n boxes, scores, classes, num = self.inference(image)\n scores = scores[0]\n classes = classes[0]\n good_scores = np.argwhere(scores > SCORE_THRESH)\n good_classes = classes[good_scores]\n if len(good_scores) < 1:\n # No detections\n return TrafficLight.UNKNOWN\n class_mode = int(mode(good_classes)[0][0][0])\n return class_lookup[class_mode]\n" ]
[ [ "scipy.stats.mode", "tensorflow.Graph", "tensorflow.Session", "tensorflow.GraphDef", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "tensorflow.ConfigProto", "numpy.random.randint", "numpy.argwhere", "numpy.expand_dims" ] ]
DiptoChakrabarty/sentimental
[ "9327ed6ce43ec5317b72d7fd237820d4045cd9fb" ]
[ "app.py" ]
[ "from flask import Flask, render_template,url_for,request\nfrom sklearn.externals import joblib\nimport pandas as pd\nimport numpy as np \nfrom sklearn.feature_extraction.text import CountVectorizer\nimport string,pickle\nfrom sentiment import vector,tf\nfrom sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer,TfidfTransformer\n\ndef patternremove(text,pattern):\n reg = re.findall(pattern,text)\n for pat in reg:\n text = re.sub(pat,\"\",text)\n return text\n\ndef count_punct(text):\n count = sum([1 for char in text if char in string.punctuation ])\n return round(count/(len(text) - text.count(\" \")),3)*100\n\n\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef home():\n return render_template(\"home.html\")\n\n\[email protected](\"/predict\",methods=[\"POST\"])\ndef predict():\n if request.method == \"POST\":\n print(1)\n file = open(\"feature.pkl\",'rb')\n cv= pickle.load(file)\n print(2)\n msg = request.form[\"message\"]\n data = [msg]\n body_len = pd.DataFrame([len(data)- data.count(\" \")])\n print(4)\n vect = pd.DataFrame(cv.transform(data).toarray())\n print(3)\n punct = pd.DataFrame([count_punct(data)])\n total_data = pd.concat([punct,vect],axis=1)\n\n log = joblib.load(\"model.pkl\")\n pred = log.predict(total_data)\n print(pred)\n return render_template(\"predict.html\",pred=pred)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\",debug=True)" ]
[ [ "sklearn.externals.joblib.load", "pandas.concat" ] ]
cardiffgravity/em-followup
[ "b3df83f3d4aacef67284d2b82a6cc7c32d5b87df" ]
[ "download_data.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 17 10:02:19 2018\n\n@author: lewisprole\n\"\"\"\n\nimport os\nimport time\nimport sys\nimport argparse\nimport calendar\nimport requests\nimport numpy as np\n\n\n'''\nlewis notes\ncreate txt file called userdata.txt\nwrite:\nLCOGT archive username, password, datafolder, and the name of the proposals\ndatafolder (I think) is the name of where you want to save files \nnames of the proposals separated by commas \n\ne.g.\nusername = \npassword = \ndatafolder = \nproposals = \n\nin command prompt \npython Documents/University/year3/summer_project/download_data.py -sdate 2016-04-01 -proposalID \"MyProp-001\" -datafolder \"/Users/lewisprole/Documents/University/year3/summer_project/LCO_images\"\nset start date, ID, and directory\n\nmake a directory path to userdata.dat\n'''\n\n\n#def parse_args():\n# \"\"\"Parse command-line inputs\"\"\"\n#\n# parser = argparse.ArgumentParser()\n# parser.add_argument('-sdate', default=None,\n# help='Start date for search [YYYY-MM-DD]')\n# parser.add_argument('-edate', default=None,\n# help='End date for search. [YYYY-MM-DD]')\n# parser.add_argument('-proposalID', default=None,\n# help='List of proposals to search for')\n# parser.add_argument('-datafolder', default=None,\n# help='Directory where the data will be downloaded into.')\n# parser.add_argument('-flatdir', action='store_true',\n# help='Use a flat directory structure instead of sorting into date subdirectories')\n# parser.add_argument('-spectra', action='store_true',\n# help='Only download NRES spectral packages (ending w/ .tar.gz)')\n#\n# args = parser.parse_args()\n#\n# return args\nsdate='2018-06-19'\nedate='2018-08-19'\nproposalID=\"LCOEPO2018A-004\"\nPATH=\"/Users/lewisprole/Documents/University/year3/summer_project\"\ndatafolder=\"/Users/lewisprole/Documents/University/year3/summer_project/LCO_images\"\nspectra=False\n\ndef download_frames(sdate, edate, headers, prop, datafolder):\n \"\"\"Download files\n This function downloads all the frames for a given range of dates, querying\n 50 frames at a time (i.e., if 150 frames have to be downloaded, the process \n is repeated 3 times, each time downloading 50 frames). This number \n assumes connections can be as bad as to be able to download only ~1 Mb per \n minute (each get request shares file urls that last 48 hours only), assuming \n 60 MB frames (worst case scenarios).\n \n It returns the number of total identified frames for the given range and the \n number of frames downloaded (which is equal to the number of identified frames \n if no data for that time range was detected on the system).\n Args:\n sdate (time.time): Search for data collected on this date or later\n edate (time.time): Search for data collected before this date\n headers (dict): authentication token from the LCO archive\n prop (list): List of proposal IDs to search for\n datafolder (string): Directory to put the data\n Returns:\n tuple: list of files found on the archive, list of files actually downloaded\n \"\"\"\n nidentified = 0\n ndownloaded = 0\n response = requests.get('https://archive-api.lco.global/frames/?' +\n 'limit=50&' +\n 'RLEVEL=91&' +\n 'start='+sdate+'&' +\n 'end='+edate+'&' +\n 'PROPID=' + prop,\n headers=headers).json()\n print(response)\n frames = response['results']\n print(frames)\n if len(frames) != 0:\n print('\\t > Frames identified for the '+sdate+'/'+edate+' period. Checking frames...')\n while True:\n for frame in frames:\n print(frame)\n nidentified += 1\n # Get date of current image frame:\n date = frame['OBJECT']\n\n # Create new folder with the date if not already there:\n \n \n outpath = os.path.join(datafolder, 'raw', date)\n if not os.path.exists(outpath):\n os.mkdir(outpath)\n\n # Check if file is already on disk and that is not a _cat.fits. If not there\n # and is not a _cat.fits, download the file:\n if not os.path.exists(os.path.join(outpath, frame['filename'])) and\\\n '_cat.fits' != frame['filename'][-9:]:\n if spectra and not frame['filename'].endswith('.tar.gz'):\n continue\n print('\\t + File '+frame['filename']+' not found in '+outpath)\n print('\\t Downloading ...')\n with open(os.path.join(outpath, frame['filename']), 'wb') as f:\n f.write(requests.get(frame['url']).content)\n ndownloaded += 1\n if response.get('next'):\n response = requests.get(response['next'], headers=headers).json()\n frames = response['results']\n else:\n break\n return nidentified, ndownloaded\n\n\ndef get_headers_from_token(username, password):\n \"\"\"\n This function gets an authentication token from the LCO archive.\n Args:\n username (string): User name for LCO archive\n password (string): Password for LCO archive\n Returns:\n dict: LCO authentication token\n \"\"\"\n # Get LCOGT token:\n response = requests.post('https://archive-api.lco.global/api-token-auth/',\n data={'username': username,\n 'password': password}\n ).json()\n\n token = response.get('token')\n\n # Store the Authorization header\n headers = {'Authorization': 'Token ' + token}\n return headers\n\n\nif __name__ == '__main__':\n# args = parse_args()\n\n starting_date = sdate\n ending_date = edate\n propID = proposalID\n dfolder = datafolder\n\n print('\\n\\t ----------------------------------------------')\n print('\\t lcogtDD v.1.2.\\n')\n print('\\t Author: Nestor Espinoza ([email protected])')\n print('\\t (github@nespinoza)')\n print('\\t w/ contributions from: BJ Fulton ([email protected])')\n print('\\t (github@bjfultn)')\n print('\\t ----------------------------------------------\\n')\n # Check that user input is ok:\n if starting_date is None:\n print('\\t lgogtDD input error: Please, insert a starting date from which')\n print('\\t to download data from. Usage example:\\n')\n print('\\t python download_data -sdate 2016-04-01')\n print('\\n')\n sys.exit()\n\n # Get current date (in order to explore it, we need to leave\n # ending_date = ending_date + 1 day:\n if ending_date is None:\n ending_date = time.strftime(\"%Y-%m-%d\")\n print('\\t > Checking data from {} to {}...\\n'.format(starting_date, ending_date))\n c_y, c_m, c_d = ending_date.split('-')\n if int(c_d) + 1 <= calendar.monthrange(int(c_y), int(c_m))[-1]:\n ending_date = c_y + '-' + c_m + '-' + str(int(c_d) + 1)\n elif int(c_m) + 1 <= 12:\n ending_date = c_y + '-' + str(int(c_m) + 1) + '-01'\n else:\n ending_date = str(int(c_y) + 1) + '-01-01'\n else:\n print('\\t > Checking data from {} to {}...\\n'.format(starting_date, ending_date))\n # Get data from user file:\n f = open(PATH+'/userdata.txt', 'r')\n username = (f.readline().split('=')[-1]).split()[0]\n password = (f.readline().split('=')[-1]).split()[0]\n datafolder = (f.readline().split('=')[-1]).split()[0]\n proposals = (f.readline().split('=')[-1]).split(',')\n\n if propID is not None:\n proposals = propID.split(',')\n\n if dfolder is not None:\n datafolder = dfolder\n\n print('\\t > Proposals from which data will be fetched: {}'.format(' '.join(proposals)))\n for i in range(len(proposals)):\n proposals[i] = proposals[i].split()[0]\n f.close()\n\n # Create raw folder inside data folder if not existent:\n if not os.path.exists(datafolder + '/raw/'):\n os.mkdir(datafolder + '/raw/')\n\n headers = get_headers_from_token(username, password)\n\n # Get frame names from starting to ending date:\n for prop in proposals:\n prop_frame_names = np.array([])\n prop_frame_urls = np.array([])\n c_y, c_m, c_d = starting_date.split('-')\n e_y, e_m, e_d = np.array(ending_date.split('-')).astype('int')\n while True:\n sdate = c_y + '-' + c_m + '-' + c_d\n if int(c_d) + 1 <= calendar.monthrange(int(c_y), int(c_m))[-1]:\n edate = c_y + '-' + c_m + '-' + str(int(c_d) + 1)\n elif int(c_m) + 1 <= 12:\n edate = c_y + '-' + str(int(c_m) + 1) + '-01'\n else:\n edate = str(int(c_y) + 1) + '-01-01'\n\n # Download frames in the defined time ranges:\n nidentified, ndownloaded = download_frames(sdate, edate, headers, prop, datafolder)\n if nidentified != 0:\n print('\\t Final count: ' + str(nidentified) + ' identified frames, downloaded ' +\n str(ndownloaded) + ' new ones.')\n\n # Get next year, month and day to look for. If it matches the user-defined\n # or current date, then we are done:\n c_y, c_m, c_d = edate.split('-')\n if int(c_y) == e_y and int(c_m) == e_m and int(c_d) == e_d:\n break\n\n print('\\n\\t Done!\\n')" ]
[ [ "numpy.array" ] ]
jlapeyre/qiskit-aqua
[ "6bc884dced3d1f525daa59d90f0c6ab10274282c" ]
[ "qiskit/optimization/problems/linear_expression.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Linear expression interface.\"\"\"\n\nfrom typing import List, Union, Dict, Any\n\nfrom numpy import ndarray\nfrom scipy.sparse import spmatrix, dok_matrix\n\nfrom .quadratic_program_element import QuadraticProgramElement\nfrom ..exceptions import QiskitOptimizationError\n\n\nclass LinearExpression(QuadraticProgramElement):\n \"\"\" Representation of a linear expression by its coefficients.\"\"\"\n\n def __init__(self, quadratic_program: Any,\n coefficients: Union[ndarray, spmatrix, List[float],\n Dict[Union[int, str], float]]) -> None:\n \"\"\"Creates a new linear expression.\n\n The linear expression can be defined via an array, a list, a sparse matrix, or a dictionary\n that uses variable names or indices as keys and stores the values internally as a\n dok_matrix.\n\n Args:\n quadratic_program: The parent QuadraticProgram.\n coefficients: The (sparse) representation of the coefficients.\n\n \"\"\"\n super().__init__(quadratic_program)\n self.coefficients = coefficients\n\n def __getitem__(self, i: Union[int, str]) -> float:\n \"\"\"Returns the i-th coefficient where i can be a variable name or index.\n\n Args:\n i: the index or name of the variable corresponding to the coefficient.\n\n Returns:\n The coefficient corresponding to the addressed variable.\n \"\"\"\n if isinstance(i, str):\n i = self.quadratic_program.variables_index[i]\n return self.coefficients[0, i]\n\n def __setitem__(self, i: Union[int, str], value: float) -> None:\n if isinstance(i, str):\n i = self.quadratic_program.variables_index[i]\n self._coefficients[0, i] = value\n\n def _coeffs_to_dok_matrix(self,\n coefficients: Union[ndarray, spmatrix,\n List, Dict[Union[int, str], float]]\n ) -> dok_matrix:\n \"\"\"Maps given 1d-coefficients to a dok_matrix.\n\n Args:\n coefficients: The 1d-coefficients to be mapped.\n\n Returns:\n The given 1d-coefficients as a dok_matrix\n\n Raises:\n QiskitOptimizationError: if coefficients are given in unsupported format.\n \"\"\"\n if isinstance(coefficients, list) or \\\n isinstance(coefficients, ndarray) and len(coefficients.shape) == 1:\n coefficients = dok_matrix([coefficients])\n elif isinstance(coefficients, spmatrix):\n coefficients = dok_matrix(coefficients)\n elif isinstance(coefficients, dict):\n coeffs = dok_matrix((1, self.quadratic_program.get_num_vars()))\n for index, value in coefficients.items():\n if isinstance(index, str):\n index = self.quadratic_program.variables_index[index]\n coeffs[0, index] = value\n coefficients = coeffs\n else:\n raise QiskitOptimizationError(\"Unsupported format for coefficients.\")\n return coefficients\n\n @property\n def coefficients(self) -> dok_matrix:\n \"\"\" Returns the coefficients of the linear expression.\n\n Returns:\n The coefficients of the linear expression.\n \"\"\"\n return self._coefficients\n\n @coefficients.setter\n def coefficients(self,\n coefficients: Union[ndarray, spmatrix,\n List[float], Dict[Union[str, int], float]]\n ) -> None:\n \"\"\"Sets the coefficients of the linear expression.\n\n Args:\n coefficients: The coefficients of the linear expression.\n \"\"\"\n self._coefficients = self._coeffs_to_dok_matrix(coefficients)\n\n def to_array(self) -> ndarray:\n \"\"\"Returns the coefficients of the linear expression as array.\n\n Returns:\n An array with the coefficients corresponding to the linear expression.\n \"\"\"\n return self._coefficients.toarray()[0]\n\n def to_dict(self, use_name: bool = False) -> Dict[Union[int, str], float]:\n \"\"\"Returns the coefficients of the linear expression as dictionary, either using variable\n names or indices as keys.\n\n Args:\n use_name: Determines whether to use index or names to refer to variables.\n\n Returns:\n An dictionary with the coefficients corresponding to the linear expression.\n \"\"\"\n if use_name:\n return {self.quadratic_program.variables[k].name: v\n for (_, k), v in self._coefficients.items()}\n else:\n return {k: v for (_, k), v in self._coefficients.items()}\n\n def evaluate(self, x: Union[ndarray, List, Dict[Union[int, str], float]]) -> float:\n \"\"\"Evaluate the linear expression for given variables.\n\n Args:\n x: The values of the variables to be evaluated.\n\n Returns:\n The value of the linear expression given the variable values.\n \"\"\"\n # cast input to dok_matrix if it is a dictionary\n x = self._coeffs_to_dok_matrix(x)\n\n # compute the dot-product of the input and the linear coefficients\n val = (x @ self.coefficients.transpose())[0, 0]\n\n # return the result\n return val\n" ]
[ [ "scipy.sparse.dok_matrix" ] ]
djzelenak/ard-clip-auxiliary
[ "0d83abdc8d8dddae23d31f9e043b098f9dc7cfc3" ]
[ "geo_utils.py" ]
[ "from collections import namedtuple\r\nimport numpy as np\r\n\r\n\r\nclass GetExtents:\r\n\r\n GeoExtent = namedtuple('GeoExtent', ['x_min', 'y_max', 'x_max', 'y_min'])\r\n GeoAffine = namedtuple('GeoAffine', ['ul_x', 'x_res', 'rot_1', 'ul_y', 'rot_2', 'y_res'])\r\n GeoCoordinate = namedtuple('GeoCoordinate', ['x', 'y'])\r\n RowColumn = namedtuple('RowColumn', ['row', 'column'])\r\n RowColumnExtent = namedtuple('RowColumnExtent', ['start_row', 'start_col', 'end_row', 'end_col'])\r\n\r\n CONUS_EXTENT = GeoExtent(x_min=-2565585,\r\n y_min=14805,\r\n x_max=2384415,\r\n y_max=3314805)\r\n\r\n def __init__(self, h, v):\r\n \"\"\"\r\n \r\n :param h: \r\n :param v: \r\n \"\"\"\r\n\r\n self.H = h\r\n\r\n self.V = v\r\n\r\n self.TILE_EXTENT, self.PIXEL_AFFINE = self.geospatial_hv(self.H, self.V)\r\n\r\n self.chip_ulx_coords = [x for x in range(self.TILE_EXTENT.x_min, self.TILE_EXTENT.x_max, 3000)]\r\n self.chip_uly_coords = [y for y in range(self.TILE_EXTENT.y_max, self.TILE_EXTENT.y_min, -3000)]\r\n\r\n self.CHIP_UL = [self.GeoCoordinate(x=i, y=j) for j in self.chip_uly_coords for i in self.chip_ulx_coords]\r\n\r\n self.CHIP_EXTENTS = {ind + 1: self.get_chip_extent(chip_coord[0], chip_coord[1]) for ind, chip_coord in\r\n enumerate(self.CHIP_UL)}\r\n\r\n def geo_to_rowcol(self, affine, coord):\r\n \"\"\"\r\n Transform geo-coordinate to row/col given a reference affine.\r\n \r\n Yline = (Ygeo - GT(3) - Xpixel*GT(4)) / GT(5)\r\n Xpixel = (Xgeo - GT(0) - Yline*GT(2)) / GT(1)\r\n \r\n :param affine: \r\n :param coord: \r\n :return: \r\n \"\"\"\r\n\r\n row = (coord.y - affine.ul_y - affine.ul_x * affine.rot_2) / affine.y_res\r\n col = (coord.x - affine.ul_x - affine.ul_y * affine.rot_1) / affine.x_res\r\n\r\n return self.RowColumn(row=int(row),\r\n column=int(col))\r\n\r\n def rowcol_to_geo(self, affine, rowcol):\r\n \"\"\"\r\n Transform a row/col into a geospatial coordinate given reference affine.\r\n \r\n Xgeo = GT(0) + Xpixel*GT(1) + Yline*GT(2)\r\n Ygeo = GT(3) + Xpixel*GT(4) + Yline*GT(5)\r\n \r\n :param affine: \r\n :param rowcol: \r\n :return: \r\n \"\"\"\r\n\r\n x = affine.ul_x + rowcol.column * affine.x_res + rowcol.row * affine.rot_1\r\n y = affine.ul_y + rowcol.column * affine.rot_2 + rowcol.row * affine.y_res\r\n\r\n return self.GeoCoordinate(x=x, y=y)\r\n\r\n\r\n def geospatial_hv(self, h, v, loc=CONUS_EXTENT):\r\n \"\"\"\r\n Geospatial extent and 30m affine for a given ARD grid location.\r\n \r\n :param h: \r\n :param v: \r\n :param loc: \r\n :return: \r\n \"\"\"\r\n\r\n xmin = loc.x_min + h * 5000 * 30\r\n xmax = loc.x_min + h * 5000 * 30 + 5000 * 30\r\n ymax = loc.y_max - v * 5000 * 30\r\n ymin = loc.y_max - v * 5000 * 30 - 5000 * 30\r\n\r\n return (self.GeoExtent(x_min=xmin, x_max=xmax, y_max=ymax, y_min=ymin),\r\n self.GeoAffine(ul_x=xmin, x_res=30, rot_1=0, ul_y=ymax, rot_2=0, y_res=-30))\r\n\r\n\r\n def get_chip_extent(self, chip_ulx, chip_uly):\r\n \"\"\"\r\n \r\n :param chip_ulx: \r\n :param chip_uly: \r\n :return: \r\n \"\"\"\r\n\r\n return self.GeoExtent(x_min=chip_ulx, x_max=chip_ulx + 3000,\r\n y_min=chip_uly - 3000, y_max=chip_uly)\r\n\r\n def get_pixel_coords(self, chip_extent):\r\n \"\"\"\r\n Generate the pixel ul coordinates\r\n :param chip_ul: \r\n :return: \r\n \"\"\"\r\n\r\n chip_array = np.zeros((100,100))\r\n\r\n coord_keys = [(i, j) for i in range(100) for j in range(100)]\r\n\r\n pixel_x0 = chip_extent.x_min # + 15\r\n pixel_y0 = chip_extent.y_max # - 15\r\n\r\n pixel_x_coords = [pixel_x0 + (i * 30) for i in range(100)]\r\n pixel_y_coords = [pixel_y0 - (i * 30) for i in range(100)]\r\n\r\n pixel_dict = {coord_keys[ind_x + ind_y * 100] : self.GeoCoordinate(x=x, y=y)\r\n for ind_y, y in enumerate(pixel_y_coords)\r\n for ind_x, x in enumerate(pixel_x_coords)}\r\n\r\n return {coord_keys[ind_x + ind_y * 100] : self.GeoCoordinate(x=x, y=y)\r\n for ind_y, y in enumerate(pixel_y_coords)\r\n for ind_x, x in enumerate(pixel_x_coords)}\r\n" ]
[ [ "numpy.zeros" ] ]
Alexsandruss/sklearn-onnx
[ "b612557615df439e471867a676c9eca8ae4a787c" ]
[ "skl2onnx/operator_converters/sgd_classifier.py" ]
[ "# SPDX-License-Identifier: Apache-2.0\n\n\nimport numpy as np\nfrom ..common._apply_operation import (\n apply_add, apply_cast, apply_clip, apply_concat, apply_div, apply_exp,\n apply_identity, apply_mul, apply_reciprocal, apply_reshape, apply_sub)\nfrom ..common.data_types import (\n BooleanTensorType, Int64TensorType, guess_numpy_type)\nfrom ..common._registration import register_converter\nfrom ..common.utils_classifier import get_label_classes\nfrom ..proto import onnx_proto\n\n\ndef _decision_function(scope, operator, container, model):\n \"\"\"Predict for linear model.\n score = X * coefficient + intercept\n \"\"\"\n coef_name = scope.get_unique_variable_name('coef')\n intercept_name = scope.get_unique_variable_name('intercept')\n matmul_result_name = scope.get_unique_variable_name(\n 'matmul_result')\n score_name = scope.get_unique_variable_name('score')\n coef = model.coef_.T\n\n container.add_initializer(coef_name, onnx_proto.TensorProto.FLOAT,\n coef.shape, coef.ravel())\n container.add_initializer(intercept_name, onnx_proto.TensorProto.FLOAT,\n model.intercept_.shape, model.intercept_)\n\n input_name = operator.inputs[0].full_name\n if type(operator.inputs[0].type) in (BooleanTensorType, Int64TensorType):\n cast_input_name = scope.get_unique_variable_name('cast_input')\n\n apply_cast(scope, operator.input_full_names, cast_input_name,\n container, to=onnx_proto.TensorProto.FLOAT)\n input_name = cast_input_name\n\n container.add_node(\n 'MatMul', [input_name, coef_name],\n matmul_result_name,\n name=scope.get_unique_operator_name('MatMul'))\n apply_add(scope, [matmul_result_name, intercept_name],\n score_name, container, broadcast=0)\n return score_name\n\n\ndef _handle_zeros(scope, container, proba, reduced_proba, num_classes):\n \"\"\"Handle cases where reduced_proba values are zeros to avoid NaNs in\n class probability scores because of divide by 0 when we calculate\n proba / reduced_proba in _normalise_proba().\n This is done by replacing reduced_proba values of 0s with\n num_classes and corresponding proba values with 1.\n \"\"\"\n num_classes_name = scope.get_unique_variable_name('num_classes')\n bool_reduced_proba_name = scope.get_unique_variable_name(\n 'bool_reduced_proba')\n bool_not_reduced_proba_name = scope.get_unique_variable_name(\n 'bool_not_reduced_proba')\n not_reduced_proba_name = scope.get_unique_variable_name(\n 'not_reduced_proba')\n proba_updated_name = scope.get_unique_variable_name('proba_updated')\n mask_name = scope.get_unique_variable_name('mask')\n reduced_proba_updated_name = scope.get_unique_variable_name(\n 'reduced_proba_updated')\n\n container.add_initializer(num_classes_name, onnx_proto.TensorProto.FLOAT,\n [], [num_classes])\n\n apply_cast(scope, reduced_proba, bool_reduced_proba_name, container,\n to=onnx_proto.TensorProto.BOOL)\n container.add_node('Not', bool_reduced_proba_name,\n bool_not_reduced_proba_name,\n name=scope.get_unique_operator_name('Not'))\n apply_cast(scope, bool_not_reduced_proba_name, not_reduced_proba_name,\n container, to=onnx_proto.TensorProto.FLOAT)\n apply_add(scope, [proba, not_reduced_proba_name],\n proba_updated_name, container, broadcast=1)\n apply_mul(scope, [not_reduced_proba_name, num_classes_name],\n mask_name, container, broadcast=1)\n apply_add(scope, [reduced_proba, mask_name],\n reduced_proba_updated_name, container, broadcast=0)\n return proba_updated_name, reduced_proba_updated_name\n\n\ndef _normalise_proba(scope, operator, container, proba, num_classes,\n unity_name):\n reduced_proba_name = scope.get_unique_variable_name('reduced_proba')\n sub_result_name = scope.get_unique_variable_name('sub_result')\n\n if num_classes == 2:\n apply_sub(scope, [unity_name, proba],\n sub_result_name, container, broadcast=1)\n apply_concat(scope, [sub_result_name, proba],\n operator.outputs[1].full_name, container, axis=1)\n else:\n if container.target_opset < 13:\n container.add_node(\n 'ReduceSum', proba, reduced_proba_name, axes=[1],\n name=scope.get_unique_operator_name('ReduceSum'))\n else:\n axis_name = scope.get_unique_variable_name('axis')\n container.add_initializer(\n axis_name, onnx_proto.TensorProto.INT64, [1], [1])\n container.add_node(\n 'ReduceSum', [proba, axis_name], reduced_proba_name,\n name=scope.get_unique_operator_name('ReduceSum'))\n proba_updated, reduced_proba_updated = _handle_zeros(\n scope, container, proba, reduced_proba_name, num_classes)\n apply_div(scope, [proba_updated, reduced_proba_updated],\n operator.outputs[1].full_name, container, broadcast=1)\n return operator.outputs[1].full_name\n\n\ndef _predict_proba_log(scope, operator, container, scores, num_classes):\n \"\"\"Probability estimation for SGDClassifier with loss=log and\n Logistic Regression.\n Positive class probabilities are computed as\n 1. / (1. + exp(-scores))\n multiclass is handled by normalising that over all classes.\n \"\"\"\n negate_name = scope.get_unique_variable_name('negate')\n negated_scores_name = scope.get_unique_variable_name('negated_scores')\n exp_result_name = scope.get_unique_variable_name('exp_result')\n unity_name = scope.get_unique_variable_name('unity')\n add_result_name = scope.get_unique_variable_name('add_result')\n proba_name = scope.get_unique_variable_name('proba')\n\n container.add_initializer(negate_name, onnx_proto.TensorProto.FLOAT,\n [], [-1])\n container.add_initializer(unity_name, onnx_proto.TensorProto.FLOAT,\n [], [1])\n\n apply_mul(scope, [scores, negate_name],\n negated_scores_name, container, broadcast=1)\n apply_exp(scope, negated_scores_name, exp_result_name, container)\n apply_add(scope, [exp_result_name, unity_name],\n add_result_name, container, broadcast=1)\n apply_reciprocal(scope, add_result_name, proba_name, container)\n return _normalise_proba(scope, operator, container, proba_name,\n num_classes, unity_name)\n\n\ndef _predict_proba_modified_huber(scope, operator, container,\n scores, num_classes):\n \"\"\"Probability estimation for SGDClassifier with\n loss=modified_huber.\n Multiclass probability estimates are derived from binary\n estimates by normalisation.\n Binary probability estimates are given by\n (clip(scores, -1, 1) + 1) / 2.\n \"\"\"\n dtype = guess_numpy_type(operator.inputs[0].type)\n if dtype != np.float64:\n dtype = np.float32\n unity_name = scope.get_unique_variable_name('unity')\n constant_name = scope.get_unique_variable_name('constant')\n add_result_name = scope.get_unique_variable_name('add_result')\n proba_name = scope.get_unique_variable_name('proba')\n clipped_scores_name = scope.get_unique_variable_name('clipped_scores')\n\n container.add_initializer(unity_name, onnx_proto.TensorProto.FLOAT,\n [], [1])\n container.add_initializer(constant_name, onnx_proto.TensorProto.FLOAT,\n [], [2])\n\n apply_clip(scope, scores, clipped_scores_name, container,\n max=np.array(1, dtype=dtype),\n min=np.array(-1, dtype=dtype))\n apply_add(scope, [clipped_scores_name, unity_name],\n add_result_name, container, broadcast=1)\n apply_div(scope, [add_result_name, constant_name],\n proba_name, container, broadcast=1)\n return _normalise_proba(scope, operator, container, proba_name,\n num_classes, unity_name)\n\n\ndef convert_sklearn_sgd_classifier(scope, operator, container):\n \"\"\"Converter for SGDClassifier.\"\"\"\n sgd_op = operator.raw_operator\n classes = get_label_classes(scope, sgd_op)\n class_type = onnx_proto.TensorProto.STRING\n\n if np.issubdtype(classes.dtype, np.floating):\n class_type = onnx_proto.TensorProto.INT32\n classes = classes.astype(np.int32)\n elif np.issubdtype(classes.dtype, np.signedinteger):\n class_type = onnx_proto.TensorProto.INT32\n else:\n classes = np.array([s.encode('utf-8') for s in classes])\n\n classes_name = scope.get_unique_variable_name('classes')\n predicted_label_name = scope.get_unique_variable_name(\n 'predicted_label')\n final_label_name = scope.get_unique_variable_name('final_label')\n\n container.add_initializer(classes_name, class_type,\n classes.shape, classes)\n\n scores = _decision_function(scope, operator, container, sgd_op)\n options = container.get_options(sgd_op, dict(raw_scores=False))\n use_raw_scores = options['raw_scores']\n if sgd_op.loss == 'log' and not use_raw_scores:\n proba = _predict_proba_log(scope, operator, container, scores,\n len(classes))\n elif sgd_op.loss == 'modified_huber' and not use_raw_scores:\n proba = _predict_proba_modified_huber(\n scope, operator, container, scores, len(classes))\n else:\n if len(classes) == 2:\n negate_name = scope.get_unique_variable_name('negate')\n negated_scores_name = scope.get_unique_variable_name(\n 'negated_scores')\n\n container.add_initializer(\n negate_name, onnx_proto.TensorProto.FLOAT, [], [-1])\n\n apply_mul(scope, [scores, negate_name],\n negated_scores_name, container, broadcast=1)\n apply_concat(scope, [negated_scores_name, scores],\n operator.outputs[1].full_name, container, axis=1)\n else:\n apply_identity(scope, scores,\n operator.outputs[1].full_name, container)\n proba = operator.outputs[1].full_name\n\n container.add_node('ArgMax', proba,\n predicted_label_name,\n name=scope.get_unique_operator_name('ArgMax'), axis=1)\n container.add_node(\n 'ArrayFeatureExtractor', [classes_name, predicted_label_name],\n final_label_name, op_domain='ai.onnx.ml',\n name=scope.get_unique_operator_name('ArrayFeatureExtractor'))\n if class_type == onnx_proto.TensorProto.INT32:\n reshaped_final_label_name = scope.get_unique_variable_name(\n 'reshaped_final_label')\n\n apply_reshape(scope, final_label_name, reshaped_final_label_name,\n container, desired_shape=(-1,))\n apply_cast(scope, reshaped_final_label_name,\n operator.outputs[0].full_name, container,\n to=onnx_proto.TensorProto.INT64)\n else:\n apply_reshape(scope, final_label_name,\n operator.outputs[0].full_name, container,\n desired_shape=(-1,))\n\n\nregister_converter('SklearnSGDClassifier',\n convert_sklearn_sgd_classifier,\n options={'zipmap': [True, False, 'columns'],\n 'nocl': [True, False],\n 'raw_scores': [True, False]})\n" ]
[ [ "numpy.array", "numpy.issubdtype" ] ]
zxlzr/atec_back
[ "dff0fd07fef1879ee6ad51b7907ee895ef308acb" ]
[ "tf/pred_simese_cnn.py" ]
[ "# !/usr/bin/env python\nimport sys\nimport os\n\nimport tensorflow as tf\n\nfrom dataset import Dataset\nfrom train import FLAGS\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" \nFLAGS.model_dir = '../model/test1/'\nFLAGS.max_document_length = 34\nUpload=False\n\ndef main(input_file, output_file):\n \n graph = tf.Graph()\n with graph.as_default(): # with tf.Graph().as_default() as g:\n sess = tf.Session()\n with sess.as_default():\n # Load the saved meta graph and restore variables\n # saver = tf.train.Saver(tf.global_variables())\n meta_file = os.path.abspath(os.path.join(FLAGS.model_dir, 'checkpoints/model-8700.meta'))\n new_saver = tf.train.import_meta_graph(meta_file)\n #new_saver.restore(sess, tf.train.latest_checkpoint(os.path.join(FLAGS.model_dir, 'checkpoints')))\n new_saver.restore(sess, tf.train.latest_checkpoint(os.path.join(FLAGS.model_dir, 'checkpoints')))\n # graph = tf.get_default_graph()\n\n # Get the placeholders from the graph by name\n # input_x1 = graph.get_operation_by_name(\"input_x1\").outputs[0]\n input_x1 = graph.get_tensor_by_name(\"input_x1:0\") # Tensor(\"input_x1:0\", shape=(?, 15), dtype=int32)\n input_x2 = graph.get_tensor_by_name(\"input_x2:0\")\n dropout_keep_prob = graph.get_tensor_by_name(\"dropout_keep_prob:0\")\n #dropout_emb = graph.get_tensor_by_name(\"dropout_emb:0\")\n # Tensors we want to evaluate\n y_pred = graph.get_tensor_by_name(\"metrics/y_pred:0\")\n # vars = tf.get_collection('vars')\n # for var in vars:\n # print(var)\n\n e = graph.get_tensor_by_name(\"cosine:0\")\n\n # Generate batches for one epoch\n dataset = Dataset(data_file=input_file, is_training=False)\n data = dataset.process_data(data_file=input_file, sequence_length=FLAGS.max_document_length)\n batches = dataset.batch_iter(data, FLAGS.batch_size, 1, shuffle=False)\n with open(output_file, 'w') as fo:\n print(\"\\nPredicting...\\n\")\n lineno = 1\n for batch in batches:\n #print batch\n #exit(1)\n x1_batch, x2_batch, _, _ = zip(*batch)\n y_pred_ = sess.run([y_pred], {input_x1: x1_batch, input_x2: x2_batch, dropout_keep_prob: 1.0})\n for pred in y_pred_[0]:\n fo.write('{}\\t{}\\n'.format(lineno, int(pred)))\n lineno += 1\n\nif __name__ == '__main__':\n # Set to INFO for tracking training, default is WARN. ERROR for least messages\n tf.logging.set_verbosity(tf.logging.WARN)\n main(sys.argv[1], sys.argv[2])\n \n if Upload==False:\n file = open(sys.argv[1]) \n y_true=[]\n for line in file:\n y_true.append(int(line.strip().split('\\t')[3]))\n file.close()\n file = open(sys.argv[2]) \n y_pred=[]\n\n for line in file:\n y_pred.append(int(line.strip().split('\\t')[1]))\n file.close()\n #print(y_true)\n from sklearn import metrics\n import numpy as np\n #####\n # Do classification task, \n # then get the ground truth and the predict label named y_true and y_pred\n precision = metrics.precision_score(y_true, y_pred)\n recall = metrics.recall_score(y_true, y_pred)\n score = metrics.f1_score(y_true, y_pred,average='binary')\n print(precision)\n print(recall)\n print('score: {0:f}'.format(score))\n" ]
[ [ "tensorflow.logging.set_verbosity", "tensorflow.Graph", "tensorflow.Session", "tensorflow.train.import_meta_graph", "sklearn.metrics.precision_score", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score" ] ]
UmbWill/napari
[ "f6893b18c26c2004b112ba06f53cb4087728aa88" ]
[ "napari/_qt/experimental/render/test_image.py" ]
[ "\"\"\"Create test images\n\nThis is a throw-away file for creating a test image for octree rendering\ndevelopment. If we keep test images in the product long term we'll\nhave a nicer way to generate them.\n\"\"\"\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef draw_text(image, text, nx=0.5, ny=0.5):\n\n font = ImageFont.truetype('Arial Black.ttf', size=72)\n (text_width, text_height) = font.getsize(text)\n x = nx * image.width - text_width / 2\n y = ny * image.height - text_height / 2\n\n color = 'rgb(255, 255, 255)' # white\n\n draw = ImageDraw.Draw(image)\n draw.text((x, y), text, fill=color, font=font)\n draw.rectangle([0, 0, image.width, image.height], width=5)\n\n\ndef draw_text_tiled(image, text, nrows=1, ncols=1):\n\n print(f\"Creating {nrows}x{ncols} text image: {text}\")\n\n try:\n font = ImageFont.truetype('Arial Black.ttf', size=74)\n except OSError:\n font = ImageFont.load_default()\n (text_width, text_height) = font.getsize(text)\n\n color = 'rgb(255, 255, 255)' # white\n draw = ImageDraw.Draw(image)\n\n for row in range(nrows + 1):\n for col in range(ncols + 1):\n x = (col / ncols) * image.width - text_width / 2\n y = (row / nrows) * image.height - text_height / 2\n\n draw.text((x, y), text, fill=color, font=font)\n draw.rectangle([0, 0, image.width, image.height], outline=color, width=5)\n\n\ndef create_text_array(text, nx=0.5, ny=0.5, size=(1024, 1024)):\n text = str(text)\n image = Image.new('RGB', size)\n draw_text(image, text, nx, ny)\n return np.array(image)\n\n\ndef create_tiled_text_array(text, nrows, ncols, size=(1024, 1024)):\n text = str(text)\n image = Image.new('RGB', size)\n draw_text_tiled(image, text, nrows, ncols)\n return np.array(image)\n\n\ndef create_tiled_test_1(text, nrows, ncols, size=(1024, 1024)):\n text = str(text)\n image = Image.new('RGB', size)\n draw_text_tiled(image, text, nrows, ncols)\n return np.array(image)\n" ]
[ [ "numpy.array" ] ]
orion-orion/TipDMCup20
[ "0718fbd8f2714c5a67b9dd3ce8660085fc78a766" ]
[ "feature_eng.py" ]
[ "'''\nDescripttion: \nVersion: 1.0\nAuthor: ZhangHongYu\nDate: 2021-02-18 13:15:08\nLastEditors: ZhangHongYu\nLastEditTime: 2022-05-16 15:36:04\n'''\nimport pandas as pd\nimport numpy as np\nimport os\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn import model_selection\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn import decomposition\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_selection import VarianceThreshold\nfrom scipy.interpolate import interp1d\nimport joblib\n\n# ๆ•ฐๆฎๅญ˜ๆ”พ็›ฎๅฝ•ๅฎšไน‰\ndata_root = './data'\n# ็”จไบŽ็‰นๅพ้€‰ๆ‹ฉ็š„ๆจกๅž‹็š„็›ฎๅฝ•\nfeatures_model_root = './features_model'\n# ็”จไบŽไฟๅญ˜ๆจกๅž‹็‰นๅพไฟกๆฏ็š„็›ฎๅฝ•\nfeatures_imp_root = './features_imp'\n\npca_dim = 10 # pca ้™็ปดๅŽ็š„็ปดๅบฆ\n\ntop_n = 30\n\n# ็”จไบŽ็‰นๅพ้€‰ๆ‹ฉ็š„ๆจกๅž‹ๅฎšไน‰\nmodels={}\n\n\nmodels.update({'dt':\n DecisionTreeClassifier(random_state=0)\n})\nmodels.update({'rf': \n RandomForestClassifier(random_state=0)\n})\nmodels.update({'et': \n ExtraTreesClassifier(random_state=0)\n})\nmodels.update({'xgb': \n XGBClassifier(random_state=0)\n})\n# models.update({'mlp': \n# MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,), random_state=1)\n# })\n\n\n# ็”จไบŽ็‰นๅพ้€‰ๆ‹ฉ็š„ๆจกๅž‹็š„่ถ…ๅ‚ๆ•ฐๆœ็ดข่Œƒๅ›ดๅฎšไน‰\nparam_grids = {}\nparam_grids.update({\n 'dt':\n { 'min_samples_split': [2, 4], 'max_depth': [12]}\n})\nparam_grids.update({\n 'rf':\n {'n_estimators': [500], 'min_samples_split': [2, 3], 'max_depth': [12],'n_jobs':[-1]}\n})\nparam_grids.update({\n 'et':\n {'n_estimators': [500], 'min_samples_split': [3, 4], 'max_depth': [12],'n_jobs':[-1]}\n})\nparam_grids.update({\n 'xgb':\n {'n_estimators': [500], 'max_depth': [2], 'objective':['binary:logistic'], 'eval_metric':['logloss'],'use_label_encoder':[False],'nthread':[-1]}\n})\n\n# param_grids.update({\n# 'mlp':\n# {'solver':['lbfgs'], 'alpha':[1e-5], 'hidden_layer_sizes':[(15,)], 'random_state':[1] }\n# })\n\n\n # ๅฎŒๆˆ่ถ…ๅ‚ๆ•ฐ็ฝ‘ๆ ผๆœ็ดขๅŽ็š„ๆจกๅž‹\nmodel_grids={}\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)\nfor name, param in param_grids.items():\n model_grids[name] = model_selection.GridSearchCV(models[name], param, n_jobs=-1, cv=kfold, verbose=1,scoring='f1')\n # model_grids[name] = models[name]\n\n\ndef read_data():\n data1 = pd.read_csv(os.path.join(data_root, 'ๅŸบ็ก€ๆ•ฐๆฎ.csv'), encoding='GB2312')\n data2 = pd.read_csv(os.path.join(data_root, 'ๅนดๆ•ฐๆฎ.csv'), encoding='GB2312')\n # print(data2)\n # reader3 = pd.read_table(\n # os.path.join(data_root, 'ๆ—ฅๆ•ฐๆฎ.csv'),\n # encoding='GB2312',\n # sep=',',\n # iterator=True)\n # chunks = []\n # loop = True\n # chunkSize = 10000\n # while loop:\n # try:\n # chunk = reader3.get_chunk(chunkSize)\n # print(\"**********\")\n # print(chunk)\n # print(\"*********\")\n # chunks.append(chunk)\n # except StopIteration:\n # loop = False\n # print(\"Iteration is stopped.\")\n # data = pd.concat(chunks, ignore_index=True)\n # ็ป“ๅˆๅŸบๆœฌๆ•ฐๆฎๅ’Œๅนดๆ•ฐๆฎ๏ผŒๆš‚ๆ—ถไธ่€ƒ่™‘ๆ—ฅๆ•ฐๆฎ\n combined_data = pd.merge(data2, data1, how=\"outer\", on=\"่‚ก็ฅจ็ผ–ๅท\")\n\n #ๅŽ้ขjoinๆ˜ฏ้ป˜่ฎคๆŒ‰็…งindexๆฅ็š„๏ผŒๅˆ ้™ค็ผบๅคฑๅ€ผๅŽ้‡ๆ–ฐ่ฎพ็ฝฎindex\n combined_data = combined_data.reset_index(drop=True) \n\n labels = combined_data['ๆ˜ฏๅฆ้ซ˜่ฝฌ้€'].to_list()\n # ๆˆ‘ไปฌๆ นๆฎไธŠไธ€ๅนด็š„็‰นๅพ้ข„ๆต‹ไธ‹ไธ€ๅนดๆ˜ฏๅฆ้ซ˜้€่ฝฌ๏ผŒๆ•…ๆ ‡็ญพๆ˜ฏไธ‹ไธ€ๅนด็š„\n for i in range(len(labels)-1):\n labels[i] = labels[i+1]\n combined_data['ๆ˜ฏๅฆ้ซ˜่ฝฌ้€'] = pd.Series(labels)\n return combined_data\n\n\n#็”จๆจกๅž‹ๅฏน็‰นๅพ่ฟ›่กŒ้€‰ๆ‹ฉ\ndef feature_selection(X, y, mod):\n #ๆ นๆฎ้˜ˆๅ€ผ็งป้™คไฝŽๆ–นๅทฎ็‰นๅพ\n # ๅ‡่ฎพๆ˜ฏๅธƒๅฐ”็‰นๅพ๏ผŒๆˆ‘ไปฌๆƒณ่ฆ็งป้™ค็‰นๅพๅ€ผไธบ0ๆˆ–่€…ไธบ1็š„ๆฏ”ไพ‹่ถ…่ฟ‡0.8็š„็‰นๅพ\n # ๅธƒๅฐ”็‰นๅพไธบBernoulli้šๆœบๅ˜้‡๏ผŒๆ–นๅทฎไธบp(1-p)\n # ่ฏฅๆ–นๆณ•็š„่พ“ๅ‡บไผšๆŠŠ็‰นๅพๅๅŽปๆމ๏ผŒๆ•…ไธ้‡‡็”จ\n # sel = VarianceThreshold(threshold=(.8 * (1 - .8)))\n # X_sel = sel.fit_transform(X)\n features_top_n_list = []\n if mod == 'retrain': #ๅฆ‚ๆžœๆ˜ฏ'ratrain'ๅˆ™้‡ๆ–ฐๅฏน็‰นๅพ้€‰ๆ‹ฉ็š„ๆจกๅž‹่ฟ›่กŒ่ฎญ็ปƒ๏ผŒๅนถไฟๅญ˜็ป“ๆžœ\n if not os.path.exists(features_model_root):\n os.makedirs(features_model_root)\n # SMOTE่ฟ‡้‡‡ๆ ท\n smo = SMOTE(random_state=42, n_jobs=-1 )\n X_sampling, y_sampling = smo.fit_resample(X, y)\n\n #็”จๆ‰€ๆœ‰ๆ•ฐๆฎ่ฎญ็ปƒ็”จไบŽ็‰นๅพ้€‰ๆ‹ฉ็š„ๆจกๅž‹\n for name, _ in model_grids.items():\n # ่ฟ™้‡Œๆ‰ๅฏนmodel_grids[name]่ฟ›่กŒๅฎž้™…ไฟฎๆ”น\n model_grids[name].fit(X_sampling, y_sampling)\n joblib.dump(model_grids[name], os.path.join(features_model_root, name +'.json'))\n print(\" features selection model %s has been trained \" % (name))\n\n if not os.path.exists(features_imp_root):\n os.makedirs(features_imp_root)\n \n for name, _ in model_grids.items():\n model_grids[name] = joblib.load(os.path.join(features_model_root, name+'.json')) \n model_grid = model_grids[name]\n features_imp_sorted = pd.DataFrame({'feature': list(X),\n 'importance': model_grid.best_estimator_.feature_importances_}).sort_values('importance', ascending=False)\n features_top_n = features_imp_sorted.head(top_n)['feature']\n features_top_n_imp = features_imp_sorted.head(top_n)['importance']\n features_top_n_list.append(features_top_n)\n features_output = pd.DataFrame({'features_top_n':features_top_n, 'importance':features_top_n_imp})\n features_output.to_csv(os.path.join(features_imp_root, name+'_features_top_n_importance.csv'))\n\n elif mod == 'load': #ๅฆ‚ๆžœๆ˜ฏๅˆ™็›ดๆŽฅๅŠ ่ฝฝๅทฒ็ปๅพ—ๅˆฐ็š„็‰นๅพ้€‰ๆ‹ฉ็ป“ๆžœ\n if not os.path.exists(features_imp_root):\n raise IOError(\"cant find the features imp directory: %s\" % features_imp_root)\n for name, _ in model_grids.items():\n features_top_n = pd.read_csv(os.path.join(features_imp_root, name+'_features_top_n_importance.csv'))['features_top_n']\n features_top_n_list.append(features_top_n)\n\n else:\n raise IOError(\"invalid mod!\") \n \n \n\n # ๅŠ ่ฝฝ็”จไบŽ็‰นๅพ้€‰ๆ‹ฉ็š„ๆจกๅž‹ๅนถ้€‰ๅ‡บtop-n็š„็‰นๅพ\n features_top_n = pd.concat(features_top_n_list, ignore_index=True).drop_duplicates()\n X = pd.DataFrame(X[features_top_n])\n return X\n\n\ndef data_preprocess(data):\n\n # ่Žทๅพ—ๆฏไธช็‰นๅพ็š„็ผบๅคฑไฟกๆฏ\n null_info = data.isnull().sum(axis=0)\n # ไธขๅผƒ็ผบๅคฑๅ€ผๅคšไบŽ30%็š„็‰นๅพ\n features = [k for k, v in dict(null_info).items() if v < data.shape[0]* 0.3]\n data = data[features]\n\n null_info = data.isnull().sum(axis=0)\n\n # ้€‰ๅŽปๅ‡บ้œ€่ฆๅกซ่กฅ็ผบๅคฑๅ€ผ็š„็‰นๅพ\n features_fillna = [k for k, v in dict(null_info).items() if v > 0]\n\n # ๅฏน็ผบๅคฑๅ€ผ่ฟ›่กŒๅกซ่กฅ\n for feature in features_fillna:\n # ๅฆ‚ๆžœๆ˜ฏ้žๆ•ฐๅ€ผๅž‹็‰นๅพๆˆ–่€…ๆ˜ฏๆ•ดๅž‹็ฆปๆ•ฃๆ•ฐๅ€ผ๏ผŒ็”จไผ—ๆ•ฐๅกซ่กฅ\n #ๅฐ†ๅˆ—ๆŒ‰ๅ‡บ็Žฐ้ข‘็އ็”ฑ้ซ˜ๅˆฐไฝŽๆŽ’ๅบ๏ผŒไผ—ๆ•ฐๅณ็ฌฌไธ€่กŒ๏ผŒinplace่กจ็คบๅŽŸๅœฐไฟฎๆ”น\n if isinstance(data[feature], object) or isinstance(data[feature], int):\n data.loc[:, feature] = data[feature].fillna(\n data[feature].mode().iloc[0]\n )\n #ๆตฎ็‚น่ฟž็ปญๆ•ฐๅ€ผๅž‹็‰นๅพๆ’ๅ€ผๅกซ่กฅ+ๅนณๅ‡ๆ•ฐๅค„็†่พน็ผ˜\n else:\n #ๅ…ˆๅฐ†ไธญ้—ด็š„ๆ•ฐๆฎๆ’ๅ€ผๅค„็†\n data.loc[:, feature] = data[feature].interpolate( method=\"zero\", axis=0, limit_direction='both')\n #่พน็ผ˜็›ดๆŽฅๅกซๅ……ๅนณๅ‡ๆ•ฐ\n data.loc[:, feature] = data[feature].fillna(\n data[feature].mean()\n )\n if np.isnan(data.loc[:, feature]).any():\n print(data.loc[:, feature])\n # print(data[feature])\n\n # ๅญ—็ฌฆ็‹ฌ็ƒญ็ผ–็ ไธŽๆ•ฐๅ€ผๅฝ’ไธ€ๅŒ–\n # ๅ…ˆๅค„็†ๆ‰€ๅฑžๆฆ‚ๅฟตๆฟๅ—่ฟ™ไธ€ๅˆ—\n all_types = {} #ๆ€ปๅ…ฑ็š„types็ง็ฑป\n for idx, combined_type in data['ๆ‰€ๅฑžๆฆ‚ๅฟตๆฟๅ—'].items():\n types = combined_type.split(';')\n dict_type = {}\n for type_ in types:\n dict_type[type_] = 1\n all_types[type_] = 1\n data['ๆ‰€ๅฑžๆฆ‚ๅฟตๆฟๅ—'][idx] = dict_type\n for idx, dict_type in data['ๆ‰€ๅฑžๆฆ‚ๅฟตๆฟๅ—'].items():\n for k in all_types.keys():\n if k in dict_type.keys():\n continue\n else:\n data['ๆ‰€ๅฑžๆฆ‚ๅฟตๆฟๅ—'][idx][k] = 0\n for col in data.columns:\n if col == 'ๆ˜ฏๅฆ้ซ˜่ฝฌ้€': # ่ทณ่ฟ‡ๆ ‡็ญพๅˆ—\n continue\n if col == '่‚ก็ฅจ็ผ–ๅท':\n # ่ฟ™้‡Œๆ ‡็งฐๅฝขไธๆ˜ฏ่ฟž็ปญ็š„๏ผŒไธ่ƒฝ็›ดๆŽฅ่ฝฌๆขไธบๆ•ฐๅ€ผ\n # data.loc[:, col] = pd.factorize(\n # data[col])[0]\n # ๅช่ƒฝ่ฝฌๆขไธบdummy็ผ–็ ๏ผŒไปฅไธ‹ไธบ่Žทๅ–dummy็ผ–็ , ๅŽ้ข่ฟ˜่ฆ็”จ๏ผŒๆš‚ๆ—ถไฟๅญ˜ๅ‰ฏๆœฌ\n dummies_df = pd.get_dummies(data[col], prefix=str(col))\n data = data.join(dummies_df)\n continue\n if col == 'ๆ‰€ๅฑžๆฆ‚ๅฟตๆฟๅ—': #ๅฏนๆ‰€ๅฑžๆฆ‚ๅฟตๆฟๅ—ๅ•็‹ฌๅค„็†\n vec = DictVectorizer()\n arr = np.array(vec.fit_transform(data[col].to_list()).toarray())\n data = data.drop(col, axis=1)\n for i in range(arr.shape[1]):\n data = data.join(pd.DataFrame({(col+str(i)): arr[:, i]}))\n continue\n if isinstance(data[col], object):\n # ่ฟ™้‡Œๆ ‡็งฐๅฝขไธๆ˜ฏ่ฟž็ปญ็š„๏ผŒไธ่ƒฝ็›ดๆŽฅ่ฝฌๆขไธบๆ•ฐๅ€ผ\n # data.loc[:, col] = pd.factorize(\n # data[col])[0]\n # ๅช่ƒฝ่ฝฌๆขไธบdummy็ผ–็ ๏ผŒไปฅไธ‹ไธบ่Žทๅ–dummy็ผ–็ \n dummies_df = pd.get_dummies(data[col], prefix=str(col))\n data = data.drop(col, axis=1)\n data = data.join(dummies_df)\n else:\n # ๅฏนๆ•ฐๅ€ผ็‰นๅพz-scoreๆ ‡ๅ‡†ๅŒ–\n scaler = preprocessing.StandardScaler().fit(\n np.array(data[col]).reshape(-1, 1))\n #ๅนดไปฝ็‰นๅพ่ฝฌๆขๅŽ่ฆไฟ็•™ๅ‰ฏๆœฌๅŽ้ขๅˆ’ๅˆ†ๆ•ฐๆฎ้›†็”จ\n result = scaler.transform(np.array(data[col]).reshape(-1, 1)) \n if col == 'ๅนดไปฝ๏ผˆๅนดๆœซ๏ผ‰': #ๅนดไปฝ็‰นๅพ่ฆไฟ็•™ๅŽŸๆฅๅ‰ฏๆœฌๅŽ้ขๅˆ’ๅˆ†ๆ ทๆœฌ็”จ\n copy = data[col].to_list()\n data.loc[:, col] = result\n data = data.join(pd.DataFrame({'ๅนดไปฝcopy':copy}))\n else:\n data.loc[:, col] = result #ๅ…ถไป–็‰นๅพ็›ดๆŽฅ่ฆ†็›–ๅณๅฏ\n # ๅฏนๆ•ฐๅ€ผ็‰นๅพไบŒ่Œƒๆ•ฐๅฝ’ไธ€ๅŒ–๏ผŒ่ฏฅๆ“ไฝœ็‹ฌ็ซ‹ๅฏนๅพ…ๆ ทๆœฌ๏ผŒๆ— ้œ€ๅฏนnormalizer่ฟ›่กŒfit\n # ไฝ†dummy็ผ–็ ไธๅฅฝๅค„็†๏ผŒๆ•…ไธ่€ƒ่™‘ไน‹\n # data.loc[:, col] = preprocessing.normalize(np.array(data[col]).reshape(-1, 1),norm='l2')\n\n return data\n\ndef data_decomposition(X):\n pca = decomposition.PCA()\n pca.fit(X)\n pca.n_components = pca_dim\n return pca.fit_transform(X)" ]
[ [ "sklearn.feature_extraction.DictVectorizer", "numpy.array", "numpy.isnan", "sklearn.model_selection.StratifiedKFold", "pandas.merge", "sklearn.preprocessing.StandardScaler", "sklearn.ensemble.RandomForestClassifier", "pandas.DataFrame", "pandas.concat", "sklearn.tree.DecisionTreeClassifier", "pandas.Series", "sklearn.model_selection.GridSearchCV", "sklearn.ensemble.ExtraTreesClassifier", "sklearn.decomposition.PCA" ] ]
dirac-institute/SPSAS2019
[ "34781162bb088b17c1b9afb69eee2284dce6bdec" ]
[ "aMLpy/fig_spec_reconstruction.py" ]
[ "\"\"\"\nPCA Reconstruction of a spectrum\n--------------------------------\nThis shows the reconstruction of a spectrum from eigenspectra.\n\"\"\"\n# Author: Jake VanderPlas <[email protected]>\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.decomposition import PCA\n\nfrom astroML.datasets import sdss_corrected_spectra\nfrom astroML.decorators import pickle_results\n\n#------------------------------------------------------------\n# Download data\ndata = sdss_corrected_spectra.fetch_sdss_corrected_spectra()\nspectra = sdss_corrected_spectra.reconstruct_spectra(data)\nwavelengths = sdss_corrected_spectra.compute_wavelengths(data)\n\n#------------------------------------------------------------\n# Compute PCA components\n\n# Eigenvalues can be computed using PCA as in the commented code below:\n\n#from sklearn.decomposition import PCA\n#pca = PCA()\n#pca.fit(spectra)\n#evals = pca.explained_variance_ratio_\n#evals_cs = evals.cumsum()\n\n# because the spectra have been reconstructed from masked values, this\n# is not exactly correct in this case: we'll use the values computed\n# in the file compute_sdss_pca.py\nevals = data['evals'] ** 2\nevals_cs = evals.cumsum()\nevals_cs /= evals_cs[-1]\nevecs = data['evecs']\nspec_mean = spectra.mean(0)\n\n#------------------------------------------------------------\n# Find the coefficients of a particular spectrum\nspec = spectra[1]\ncoeff = np.dot(evecs, spec - spec_mean)\n\n#------------------------------------------------------------\n# Plot the sequence of reconstructions\nfig = plt.figure(figsize=(8, 8))\nfig.subplots_adjust(hspace=0)\n\nfor i, n in enumerate([0, 4, 8, 20]):\n ax = fig.add_subplot(411 + i)\n ax.plot(wavelengths, spec, '-', c='gray')\n ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')\n\n if i < 3:\n ax.xaxis.set_major_formatter(plt.NullFormatter())\n\n ax.set_ylim(-2, 21)\n ax.set_ylabel('flux')\n\n if n == 0:\n text = \"mean\"\n elif n == 1:\n text = \"mean + 1 component\\n\"\n text += r\"$(\\sigma^2_{tot} = %.2f)$\" % evals_cs[n - 1]\n else:\n text = \"mean + %i components\\n\" % n\n text += r\"$(\\sigma^2_{tot} = %.2f)$\" % evals_cs[n - 1]\n\n ax.text(0.01, 0.95, text, ha='left', va='top', transform=ax.transAxes)\n\nfig.axes[-1].set_xlabel(r'${\\rm wavelength\\ (\\AA)}$')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "numpy.dot", "matplotlib.pyplot.NullFormatter", "matplotlib.pyplot.figure" ] ]
developmentseed/ml-enabler
[ "ab83086df5d7e3bc9bdd5594e1da2e999b31c8fa" ]
[ "tasks/task-tfrecords/generate_datanpz.py" ]
[ "# code adopted from LabelMaker (https://github.com/developmentseed/label-maker)\nimport os\nfrom os import path as op\nimport requests\nimport rasterio\nimport glob\n\nfrom requests.auth import HTTPBasicAuth\nfrom io import BytesIO\nfrom base64 import b64encode\nfrom urllib.parse import urlparse\nfrom typing import Dict, List, NamedTuple, Callable, Optional, Tuple, Any, Iterator\nfrom rasterio.io import MemoryFile\nfrom rasterio.windows import Window\nfrom PIL import Image\nimport io\n\nimport mercantile\nfrom mercantile import Tile, children\nimport numpy as np\n\ndef get_image_format(imagery):\n #TO-DO fix for non-mapbox imagery\n o = urlparse(imagery)\n _, image_format = op.splitext(o.path)\n if not image_format in ['.png', '.jpg', '.jpeg']:\n image_format = '.png'\n return image_format\n\ndef url(tile, imagery):\n \"\"\"Return a tile url provided an imagery template and a tile\"\"\"\n return imagery.replace('{x}', tile[0]).replace('{y}', tile[1]).replace('{z}', tile[2])\n\ndef download_tilelist(chip, imagery, folder):\n image_format = get_image_format(imagery['imglist'][chip]['url'])\n tile_img = op.join(folder, '{}{}'.format(imagery['imglist'][chip]['name'], image_format))\n\n r = requests.get(imagery['imglist'][chip]['url'])\n r.raise_for_status()\n\n with open(tile_img, 'wb')as w:\n w.write(r.content)\n\n return tile_img\n\ndef download_tile_tms(tile, imagery, folder, zoom, supertile):\n \"\"\"Download a satellite image tile from a tms endpoint\"\"\"\n\n image_format = get_image_format(imagery['url'])\n r = requests.get(url(tile.split('-'), imagery['url']))\n tile_img = op.join(folder, '{}{}'.format(tile, image_format))\n tile = tile.split('-')\n\n #super-tile special case\n if supertile:\n new_zoom = zoom + 1 #get zoom from ml-enabler database\n # get children\n child_tiles = children(int(tile[0]), int(tile[1]), int(tile[2]), zoom=new_zoom)\n child_tiles.sort()\n\n new_dim = 256 * (2 * (new_zoom - zoom))\n\n w_lst = []\n for i in range (2 * (new_zoom - zoom)):\n for j in range(2 * (new_zoom - zoom)):\n window = Window(i * 256, j * 256, 256, 256)\n w_lst.append(window)\n\n # request children\n with rasterio.open(tile_img, 'w', driver='jpeg', height=new_dim,\n width=new_dim, count=3, dtype=rasterio.uint8) as w:\n for num, t in enumerate(child_tiles):\n t = [str(t[0]), str(t[1]), str(t[2])]\n r = requests.get(url(t, imagery['url']))\n img = np.array(Image.open(io.BytesIO(r.content)), dtype=np.uint8)\n try:\n img = img.reshape((256, 256, 3)) # 4 channels returned from some endpoints, but not all\n except ValueError:\n img = img.reshape((256, 256, 4))\n img = img[:, :, :3]\n img = np.rollaxis(img, 2, 0)\n w.write(img, window=w_lst[num])\n else:\n r = requests.get(url(tile, imagery['url']))\n with open(tile_img, 'wb')as w:\n w.write(r.content)\n return tile_img\n\ndef download_img_match_labels(labels_folder, imagery, folder, zoom, supertile=False):\n #open the labels file and read the key (so we only download the images we have labels for)\n labels_file = op.join(labels_folder, 'labels.npz')\n nplabels = np.load(labels_file)\n\n chips_dir = op.join(folder, 'chips')\n if not op.isdir(chips_dir):\n os.makedirs(chips_dir)\n class_chips = [tile for tile in nplabels.files]\n\n #download images\n for chip in class_chips:\n if imagery['fmt'] == 'wms':\n download_tile_tms(chip, imagery, folder, zoom, supertile)\n else:\n download_tilelist(chip, imagery, folder)\n\n# package up the images + labels into one data.npz file\ndef make_datanpz(dest_folder, imagery, supertile,\n seed=False,\n split_names=('train', 'val', 'test'),\n split_vals=(0.7, .2, .1)):\n \"\"\"Generate an .npz file containing arrays for training machine learning algorithms\n Parameters\n ------------\n dest_folder: str\n Folder to save labels, tiles, and final numpy arrays into\n imagery: str\n Imagery template to download satellite images from.\n Ex: http://a.tiles.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=ACCESS_TOKEN\n seed: int\n Random generator seed. Optional, use to make results reproducible.\n split_vals: tuple\n Percentage of data to put in each catagory listed in split_names. Must\n be floats and must sum to one. Default: (0.8, 0.2)\n split_names: tupel\n Default: ('train', 'test')\n List of names for each subset of the data.\n \"\"\"\n # if a seed is given, use it\n if seed:\n np.random.seed(seed)\n\n if len(split_names) != len(split_vals):\n raise ValueError('`split_names` and `split_vals` must be the same '\n 'length. Please update your config.')\n if not np.isclose(sum(split_vals), 1):\n raise ValueError('`split_vals` must sum to one. Please update your config.')\n\n # open labels file, create tile array\n labels_file = op.join(dest_folder, 'labels.npz')\n labels = np.load(labels_file)\n tile_names = [tile for tile in labels.files]\n tile_names.sort()\n tiles = np.array(tile_names)\n np.random.shuffle(tiles)\n\n # open the images and load those plus the labels into the final arrays\n\n x_vals = []\n y_vals = []\n\n for tile in tiles:\n image_file = glob.glob(dest_folder + '/' + 'tiles/' + tile + '*')[0]\n try:\n img = Image.open(image_file)\n except FileNotFoundError:\n # we often don't download images for each label (e.g. background tiles)\n continue\n except OSError:\n print('Couldn\\'t open {}, skipping'.format(image_file))\n continue\n\n np_image = np.array(img)\n img.close()\n\n if not supertile:\n try:\n np_image = np_image.reshape((256, 256, 3)) # 4 channels returned from some endpoints, but not all\n except ValueError:\n np_image = np_image.reshape((256, 256, 4))\n np_image = np_image[:, :, :3]\n\n #focusing just on classification\n x_vals.append(np_image)\n y_vals.append(labels[tile])\n\n # Convert lists to numpy arrays\n x_vals = np.array(x_vals, dtype=np.uint8)\n y_vals = np.array(y_vals, dtype=np.uint8)\n\n # Get number of data samples per split from the float proportions\n split_n_samps = [len(x_vals) * val for val in split_vals]\n\n if np.any(split_n_samps == 0):\n raise ValueError('Split must not generate zero samples per partition.')\n\n # Convert into a cumulative sum to get indices\n split_inds = np.cumsum(split_n_samps).astype(np.integer)\n\n # Exclude last index as `np.split` handles splitting without that value\n split_arrs_x = np.split(x_vals, split_inds[:-1])\n split_arrs_y = np.split(y_vals, split_inds[:-1])\n\n save_dict = {}\n\n for si, split_name in enumerate(split_names):\n save_dict['x_{}'.format(split_name)] = split_arrs_x[si]\n save_dict['y_{}'.format(split_name)] = split_arrs_y[si]\n\n np.savez(op.join(dest_folder, 'data.npz'), **save_dict)\n print('Saving packaged file to {}'.format(op.join(dest_folder, 'data.npz')))\n" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.rollaxis", "numpy.load", "numpy.random.shuffle", "numpy.split", "numpy.any", "numpy.cumsum" ] ]
eloso42/CarND-Capstone
[ "f662a61c1ca057d744586b3a39ef23c2609ba89c" ]
[ "ros/src/waypoint_updater/waypoint_updater.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\nfrom scipy.spatial import KDTree\nimport math\nfrom std_msgs.msg import Int32\n\n\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\n\n\nLOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number\nMAX_DECEL = 0.5 # Max deceleration rate\n\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb) # gets data from tl_detector\n\n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n # TODO: Add other member variables you need below\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n\n self.base_lane = None\n self.stopline_wp_idx = -1\n \n self.loop()\n \n #def loop(self): # def from first part of the tutorial!\n # rate = rospy.Rate(50)\n # while not rospy.is_shutdown():\n # if self.pose and self.base_waypoints:\n # # get closest waypoint\n # closest_waypoint_idx = self.get_closest_waypoint_idx()\n # self.publish_waypoints(closest_waypoint_idx)\n # rate.sleep()\n \n def loop(self):\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_lane:\n # get closest waypoint\n self.publish_waypoints()\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x,y], 1)[1]\n \n # check if closest is ahead or behind the vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Equation for hyperplane through cloeste_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n #def publish_waypoints(self, closest_idx): # Function from Waypoint Updater (Partial)\n # lane = Lane()\n # lane.header = self.base_waypoints.header\n # lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx + LOOKAHEAD_WPS]\n # self.final_waypoints_pub.publish(lane)\n\n def publish_waypoints(self):\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n\n def generate_lane(self):\n lane = Lane()\n\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]\n\n if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):\n lane.waypoints = base_waypoints\n else:\n lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)\n \n return lane\n \n def decelerate_waypoints(self, waypoints, closest_idx):\n temp = []\n for i, wp in enumerate(waypoints):\n p = Waypoint()\n p.pose = wp.pose\n \n stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0) # Two WPs back from line\n dist = self.distance(waypoints, i, stop_idx) # distance from one waypoint to another\n vel = math.sqrt(2 * MAX_DECEL * dist) # velocity depends on the distance parameter (small distance, small velocity)\n if vel < 1.0: \n vel = 0.0\n \n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x) # keep the speed limit \n temp.append(p) # create a new list of waypoints\n \n return temp\n \n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n rospy.loginfo('Constructing waypoint tree')\n # self.base_waypoints = waypoints # old version\n self.base_lane = waypoints\n if not self.waypoints_2d: \n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n # TODO: Callback for /traffic_waypoint message. Implement\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n" ]
[ [ "scipy.spatial.KDTree", "numpy.array", "numpy.dot" ] ]
Coderash1998/reconcile-a-report-using-pandas
[ "71664451602fffe2c0044153b862e5c69d600d7b" ]
[ "code.py" ]
[ "# --------------\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Code starts here\ndf=pd.read_csv(path)\ndf['state']=df['state'].str.lower()\ndf['total']=df['Jan']+df['Feb']+df['Mar']\nsum_row = df[[\"Jan\", \"Feb\", \"Mar\", \"total\"]].sum()\ndf_final = df.append(sum_row, ignore_index=True)\n\n# Code ends here\n\n\n# --------------\nimport requests\n\n# Code starts here\nurl='https://en.wikipedia.org/wiki/List_of_U.S._state_abbreviations' \nresponse=requests.get(url)\ndf1=pd.read_html(response.content)[0]\ndf1 = df1.iloc[11:, :]\ndf1 = df1.rename(columns=df1.iloc[0, :]).iloc[1:, :]\ndf1['United States of America'] = df1['United States of America'].apply(lambda x: x.replace(\" \", \"\")).astype(object)\n# Code ends here\n\n\n# --------------\ndf1['United States of America'] = df1['United States of America'].astype(str).apply(lambda x: x.lower())\ndf1['US'] = df1['US'].astype(str)\n\n# Code starts here\nmapping = df1.set_index('United States of America')['US'].to_dict()\ndf_final.insert(6, 'abbr', np.nan)\ndf_final['abbr'] = df_final['state'].map(mapping)\nprint(df_final.head(15))\n# Code ends here\n\n\n# --------------\n# Code stars here\ndf_mississipi=df_final[df_final['state'] == 'mississipi'].replace(np.nan, 'MS')\ndf_final.replace(df_final.iloc[6], df_mississipi, inplace=True)\ndf_tenessee = df_final[df_final['state'] == 'tenessee'].replace(np.nan, 'TN')\ndf_final.replace(df_final.iloc[10], df_tenessee, inplace=True)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\ndf_sub=df_final[[\"abbr\", \"Jan\", \"Feb\", \"Mar\", \"total\"]].groupby(\"abbr\").sum()\nformatted_df = df_sub.applymap(lambda x: \"${:,.0f}\".format(x))\n\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\nsum_row = df_sub[[\"Jan\", \"Feb\", \"Mar\", \"total\"]].sum()\ndf_sub_sum = pd.DataFrame(data=sum_row).T\ndf_sub_sum = df_sub_sum.applymap(lambda x: \"${:,.0f}\".format(x))\nfinal_table = formatted_df.append(df_sub_sum)\nfinal_table = final_table.rename(index={0: \"Total\"})\n\nprint(final_table)\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\ndf_sub['total']=df['Jan']+df['Feb']+df['Mar']\ndf_sub['total'].plot(kind='pie')\n\n# Code ends here\n\n\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.read_html" ] ]
hobogalaxy/geometric-deep-learning
[ "b73c65ee6cecc0a53c37fabe970cae4ce8a8e82e" ]
[ "layers.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch_geometric.utils import add_self_loops\nfrom torch_geometric.nn import MessagePassing\n\n\nclass SGCNConv(MessagePassing):\n def __init__(self, coors, out_channels_1, out_features, label_dim=1, dropout=0):\n \"\"\"\n label_dim - dimention of node reprezentaion\n coors - dimension of position (for MNIST 2)\n out_channels_1 - dimension of convolution on each reprezentation chanal\n * autput will have dimention label_dim * out_channels_1\n out_features - dimension of node representation after graphConv\n \"\"\"\n super(SGCNConv, self).__init__(aggr='add')\n self.lin_in = torch.nn.Linear(coors, label_dim * out_channels_1)\n self.lin_out = torch.nn.Linear(label_dim * out_channels_1, out_features)\n self.dropout = dropout\n\n def forward(self, x, edge_index, pos):\n \"\"\"\n x - feature matrix of the whole graph [num_nodes, label_dim]\n pos - node position matrix [num_nodes, coors]\n edge_index - graph connectivity [2, num_edges]\n \"\"\"\n\n edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) # num_edges = num_edges + num_nodes\n\n return self.propagate(edge_index=edge_index, x=x, pos=pos, aggr='add') # [N, out_channels, label_dim]\n\n def message(self, pos_i, pos_j, x_j):\n \"\"\"\n pos_i [num_edges, coors]\n pos_j [num_edges, coors]\n x_j [num_edges, label_dim]\n \"\"\"\n\n tmp = pos_j - pos_i\n L = self.lin_in(tmp) # [num_edges, out_channels]\n num_nodes, label_dim = list(x_j.size())\n label_dim_out_channels_1 = list(L.size())[1]\n\n X = F.relu(L)\n Y = x_j\n X = torch.t(X)\n X = F.dropout(X, p=self.dropout, training=self.training)\n result = torch.t(\n (X.view(label_dim, -1, num_nodes) * torch.t(Y).unsqueeze(1)).reshape(label_dim_out_channels_1, num_nodes))\n return result\n\n def update(self, aggr_out):\n \"\"\"\n aggr_out [num_nodes, label_dim, out_channels]\n \"\"\"\n aggr_out = self.lin_out(aggr_out) # [num_nodes, label_dim, out_features]\n aggr_out = F.relu(aggr_out)\n aggr_out = F.dropout(aggr_out, p=self.dropout, training=self.training)\n\n return aggr_out\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.functional.dropout", "torch.t" ] ]
ProtossLuigi/vision-stretcher
[ "e57e4946e510ba7a13c8d1e6b5a26cc73d84f730" ]
[ "main.py" ]
[ "from typing import Tuple\r\nimport numpy as np\r\nfrom seam_carving import seam_carve\r\nfrom detectron2 import model_zoo\r\nfrom detectron2.engine import DefaultPredictor\r\nfrom detectron2.config import get_cfg\r\nfrom detectron2.data import MetadataCatalog\r\n\r\nUNSTRETCHABLE_CLASSES = [\r\n 'person',\r\n 'bicycle',\r\n 'car',\r\n 'motorcycle',\r\n 'airplane',\r\n 'bus',\r\n 'train',\r\n 'truck',\r\n 'cow',\r\n]\r\n\r\ndef get_classes():\r\n cfg = get_cfg()\r\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\r\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\r\n return MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes\r\n\r\ndef init_detectron() -> Tuple[DefaultPredictor, set]:\r\n cfg = get_cfg()\r\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\r\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\r\n predictor = DefaultPredictor(cfg)\r\n\r\n metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\r\n classes_of_interest = []\r\n for i in UNSTRETCHABLE_CLASSES:\r\n classes_of_interest.append(metadata.thing_classes.index(i))\r\n \r\n return predictor, set(classes_of_interest)\r\n\r\ndef create_mask(predictor, masking_classes, img):\r\n outputs = predictor(img)\r\n masks = outputs['instances'].pred_masks.cpu().numpy()\r\n mask = np.zeros(img.shape[:2], dtype=bool)\r\n for i in range(len(outputs['instances'].pred_classes)):\r\n if outputs['instances'].pred_classes[i] in masking_classes:\r\n mask |= masks[i]\r\n return mask.astype(np.uint8) * 255\r\n\r\ndef resize(img, target_dims, mask = None):\r\n return seam_carve(img, target_dims[0] - img.shape[0], target_dims[1] - img.shape[1], mask)[0]\r\n\r\ndef pipeline(image, target_dims):\r\n predictor, coi = init_detectron()\r\n mask = create_mask(predictor, coi, image)\r\n output = resize(image, target_dims, mask)\r\n return output" ]
[ [ "numpy.zeros" ] ]
aam12/distiller
[ "fd06fcba028d023e430cd37d1531bc2ac5202ea6" ]
[ "models/cifar100/resnet_cifar.py" ]
[ "#\n# Copyright (c) 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Resnet for CIFAR10\n\nResnet for CIFAR10, based on \"Deep Residual Learning for Image Recognition\".\nThis is based on TorchVision's implementation of ResNet for ImageNet, with appropriate\nchanges for the 10-class Cifar-10 dataset.\nThis ResNet also has layer gates, to be able to dynamically remove layers.\n\n@inproceedings{DBLP:conf/cvpr/HeZRS16,\n author = {Kaiming He and\n Xiangyu Zhang and\n Shaoqing Ren and\n Jian Sun},\n title = {Deep Residual Learning for Image Recognition},\n booktitle = {{CVPR}},\n pages = {770--778},\n publisher = {{IEEE} Computer Society},\n year = {2016}\n}\n\n\"\"\"\nimport torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\n\n\n__all__ = ['resnet20_cifar100', 'resnet32_cifar100', 'resnet44_cifar100', 'resnet56_cifar100']\n\nNUM_CLASSES = 100\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, block_gates, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.block_gates = block_gates\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu1 = nn.ReLU(inplace=False) # To enable layer removal inplace must be False\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu2 = nn.ReLU(inplace=False)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = out = x\n\n if self.block_gates[0]:\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n\n if self.block_gates[1]:\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu2(out)\n\n return out\n\n\nclass ResNetCifar(nn.Module):\n\n def __init__(self, block, layers, num_classes=NUM_CLASSES):\n self.nlayers = 0\n # Each layer manages its own gates\n self.layer_gates = []\n for layer in range(3):\n # For each of the 3 layers, create block gates: each block has two layers\n self.layer_gates.append([]) # [True, True] * layers[layer])\n for blk in range(layers[layer]):\n self.layer_gates[layer].append([True, True])\n\n self.inplanes = 16 # 64\n super(ResNetCifar, self).__init__()\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(self.layer_gates[0], block, 16, layers[0])\n self.layer2 = self._make_layer(self.layer_gates[1], block, 32, layers[1], stride=2)\n self.layer3 = self._make_layer(self.layer_gates[2], block, 64, layers[2], stride=2)\n self.avgpool = nn.AvgPool2d(8, stride=1)\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, layer_gates, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(layer_gates[0], self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(layer_gates[i], self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet20_cifar100(**kwargs):\n model = ResNetCifar(BasicBlock, [3, 3, 3], **kwargs)\n return model\n\ndef resnet32_cifar100(**kwargs):\n model = ResNetCifar(BasicBlock, [5, 5, 5], **kwargs)\n return model\n\ndef resnet44_cifar100(**kwargs):\n model = ResNetCifar(BasicBlock, [7, 7, 7], **kwargs)\n return model\n\ndef resnet56_cifar100(**kwargs):\n model = ResNetCifar(BasicBlock, [9, 9, 9], **kwargs)\n return model\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
ivanchenzx/CEVT
[ "635301a0864115a1f95e01627dd29b005463c7ae" ]
[ "loss.py" ]
[ "# list all the additional loss functions\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n\n################## entropy loss (continuous target) #####################\ndef cross_entropy_soft(pred):\n softmax = nn.Softmax(dim=1)\n logsoftmax = nn.LogSoftmax(dim=1)\n loss = torch.mean(torch.sum(-softmax(pred) * logsoftmax(pred), 1))\n return loss\n\n################## attentive entropy loss (source + target) #####################\ndef attentive_entropy(pred, pred_domain):\n softmax = nn.Softmax(dim=1)\n logsoftmax = nn.LogSoftmax(dim=1)\n\n # attention weight\n entropy = torch.sum(-softmax(pred_domain) * logsoftmax(pred_domain), 1)\n weights = 1 + entropy\n\n # attentive entropy\n loss = torch.mean(weights * torch.sum(-softmax(pred) * logsoftmax(pred), 1))\n return loss\n\n################## ensemble-based loss #####################\n# discrepancy loss used in MCD (CVPR 18)\ndef dis_MCD(out1, out2):\n return torch.mean(torch.abs(F.softmax(out1,dim=1) - F.softmax(out2, dim=1)))\n\n################## MMD-based loss #####################\ndef mmd_linear(f_of_X, f_of_Y):\n # Consider linear time MMD with a linear kernel:\n # K(f(x), f(y)) = f(x)^Tf(y)\n # h(z_i, z_j) = k(x_i, x_j) + k(y_i, y_j) - k(x_i, y_j) - k(x_j, y_i)\n # = [f(x_i) - f(y_i)]^T[f(x_j) - f(y_j)]\n #\n # f_of_X: batch_size * k\n # f_of_Y: batch_size * k\n\n delta = f_of_X - f_of_Y\n loss = torch.mean(torch.mm(delta, torch.transpose(delta, 0, 1)))\n return loss\n\ndef guassian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):\n n_samples = int(source.size()[0])+int(target.size()[0])\n total = torch.cat([source, target], dim=0)\n total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))\n total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))\n L2_distance = ((total0-total1)**2).sum(2)\n if fix_sigma:\n bandwidth = fix_sigma\n else:\n bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)\n bandwidth /= kernel_mul ** (kernel_num // 2)\n bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]\n kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]\n return sum(kernel_val)\n\ndef mmd_rbf(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None, ver=2):\n batch_size = int(source.size()[0])\n kernels = guassian_kernel(source, target, kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)\n\n loss = 0\n\n if ver==1:\n for i in range(batch_size):\n s1, s2 = i, (i + 1) % batch_size\n t1, t2 = s1 + batch_size, s2 + batch_size\n loss += kernels[s1, s2] + kernels[t1, t2]\n loss -= kernels[s1, t2] + kernels[s2, t1]\n loss = loss.abs_() / float(batch_size)\n elif ver==2:\n XX = kernels[:batch_size, :batch_size]\n YY = kernels[batch_size:, batch_size:]\n XY = kernels[:batch_size, batch_size:]\n YX = kernels[batch_size:, :batch_size]\n loss = torch.mean(XX + YY - XY - YX)\n else:\n raise ValueError('ver == 1 or 2')\n\n return loss\n\ndef JAN(source_list, target_list, kernel_muls=[2.0, 2.0], kernel_nums=[2, 5], fix_sigma_list=[None, None], ver=2):\n batch_size = int(source_list[0].size()[0])\n layer_num = len(source_list)\n joint_kernels = None\n for i in range(layer_num):\n source = source_list[i]\n target = target_list[i]\n kernel_mul = kernel_muls[i]\n kernel_num = kernel_nums[i]\n fix_sigma = fix_sigma_list[i]\n kernels = guassian_kernel(source, target,\n kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)\n if joint_kernels is not None:\n joint_kernels = joint_kernels * kernels\n else:\n joint_kernels = kernels\n\n loss = 0\n\n if ver==1:\n for i in range(batch_size):\n s1, s2 = i, (i + 1) % batch_size\n t1, t2 = s1 + batch_size, s2 + batch_size\n loss += joint_kernels[s1, s2] + joint_kernels[t1, t2]\n loss -= joint_kernels[s1, t2] + joint_kernels[s2, t1]\n loss = loss.abs_() / float(batch_size)\n elif ver==2:\n XX = joint_kernels[:batch_size, :batch_size]\n YY = joint_kernels[batch_size:, batch_size:]\n XY = joint_kernels[:batch_size, batch_size:]\n YX = joint_kernels[batch_size:, :batch_size]\n loss = torch.mean(XX + YY - XY - YX)\n else:\n raise ValueError('ver == 1 or 2')\n\n return loss\n\n\ndef ivan_CEL(my_outputs, my_labels, weights):\n #specifying the batch size\n my_batch_size = my_outputs.size()[0]\n #calculating the log of softmax values\n my_outputs = F.log_softmax(my_outputs, dim=1)\n #selecting the values that correspond to labels\n my_outputs = my_outputs[range(my_batch_size), my_labels] * weights.float()\n #returning the results\n return -torch.sum(my_outputs)/my_batch_size\n\ndef H_loss(x):\n b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)\n return b.sum(dim=1).mean()" ]
[ [ "torch.nn.LogSoftmax", "torch.cat", "torch.nn.Softmax", "torch.nn.functional.log_softmax", "torch.nn.functional.softmax", "torch.transpose", "torch.exp", "torch.mean", "torch.sum" ] ]
zargit/faulty-pill-detection
[ "ff36767d4af2ece99980d4c9bbf1567fc8415061" ]
[ "data_loader.py" ]
[ "\"\"\"\nmnist_loader\n~~~~~~~~~~~~\n\nA library to load the MNIST image data. For details of the data\nstructures that are returned, see the doc strings for ``load_data``\nand ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the\nfunction usually called by our neural network code.\n\"\"\"\n\n#### Libraries\nimport cPickle\nimport gzip\n\nimport numpy as np\n\ndef load_data():\n training_data = np.load('trainingData2.npy')\n test_data = np.load('testData2.npy')\n return (training_data, test_data)\n\ndef load_data_wrapper():\n tr_d, te_d = load_data()\n training_inputs = [np.reshape(x, (1024, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n test_inputs = [np.reshape(x, (1024, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n return (training_data, test_data)\n\ndef vectorized_result(j):\n e = np.zeros((2, 1))\n e[j] = 1.0\n return e\n\n\n" ]
[ [ "numpy.load", "numpy.reshape", "numpy.zeros" ] ]
AK391/PaddleHub
[ "a51ab7447e089776766becb3297e560dfed98573" ]
[ "modules/audio/tts/fastspeech2_baker/module.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\nfrom typing import List\n\nimport numpy as np\nimport paddle\nfrom paddlehub.env import MODULE_HOME\nfrom paddlehub.module.module import moduleinfo, serving\nfrom paddlehub.utils.log import logger\nfrom parakeet.frontend.zh_frontend import Frontend\nfrom parakeet.models.fastspeech2 import FastSpeech2\nfrom parakeet.models.fastspeech2 import FastSpeech2Inference\nfrom parakeet.models.parallel_wavegan import PWGGenerator\nfrom parakeet.models.parallel_wavegan import PWGInference\nfrom parakeet.modules.normalizer import ZScore\nimport soundfile as sf\nfrom yacs.config import CfgNode\nimport yaml\n\n\n@moduleinfo(name=\"fastspeech2_baker\", version=\"1.0.0\", summary=\"\", author=\"Baidu\", author_email=\"\", type=\"audio/tts\")\nclass FastSpeech(paddle.nn.Layer):\n def __init__(self, output_dir='./wavs'):\n super(FastSpeech, self).__init__()\n fastspeech2_res_dir = os.path.join(MODULE_HOME, 'fastspeech2_baker', 'assets/fastspeech2_nosil_baker_ckpt_0.4')\n pwg_res_dir = os.path.join(MODULE_HOME, 'fastspeech2_baker', 'assets/pwg_baker_ckpt_0.4')\n\n phones_dict = os.path.join(fastspeech2_res_dir, 'phone_id_map.txt')\n with open(phones_dict, \"r\") as f:\n phn_id = [line.strip().split() for line in f.readlines()]\n vocab_size = len(phn_id)\n\n # fastspeech2\n fastspeech2_config = os.path.join(fastspeech2_res_dir, 'default.yaml')\n with open(fastspeech2_config) as f:\n fastspeech2_config = CfgNode(yaml.safe_load(f))\n self.samplerate = fastspeech2_config.fs\n\n fastspeech2_checkpoint = os.path.join(fastspeech2_res_dir, 'snapshot_iter_76000.pdz')\n model = FastSpeech2(idim=vocab_size, odim=fastspeech2_config.n_mels, **fastspeech2_config[\"model\"])\n model.set_state_dict(paddle.load(fastspeech2_checkpoint)[\"main_params\"])\n logger.info('Load fastspeech2 params from %s' % os.path.abspath(fastspeech2_checkpoint))\n model.eval()\n\n # vocoder\n pwg_config = os.path.join(pwg_res_dir, 'pwg_default.yaml')\n with open(pwg_config) as f:\n pwg_config = CfgNode(yaml.safe_load(f))\n\n pwg_checkpoint = os.path.join(pwg_res_dir, 'pwg_snapshot_iter_400000.pdz')\n vocoder = PWGGenerator(**pwg_config[\"generator_params\"])\n vocoder.set_state_dict(paddle.load(pwg_checkpoint)[\"generator_params\"])\n logger.info('Load vocoder params from %s' % os.path.abspath(pwg_checkpoint))\n vocoder.remove_weight_norm()\n vocoder.eval()\n\n # frontend\n self.frontend = Frontend(phone_vocab_path=phones_dict)\n\n # stat\n fastspeech2_stat = os.path.join(fastspeech2_res_dir, 'speech_stats.npy')\n stat = np.load(fastspeech2_stat)\n mu, std = stat\n mu = paddle.to_tensor(mu)\n std = paddle.to_tensor(std)\n fastspeech2_normalizer = ZScore(mu, std)\n\n pwg_stat = os.path.join(pwg_res_dir, 'pwg_stats.npy')\n stat = np.load(pwg_stat)\n mu, std = stat\n mu = paddle.to_tensor(mu)\n std = paddle.to_tensor(std)\n pwg_normalizer = ZScore(mu, std)\n\n # inference\n self.fastspeech2_inference = FastSpeech2Inference(fastspeech2_normalizer, model)\n self.pwg_inference = PWGInference(pwg_normalizer, vocoder)\n\n self.output_dir = Path(output_dir)\n self.output_dir.mkdir(parents=True, exist_ok=True)\n\n def forward(self, text: str):\n wav = None\n input_ids = self.frontend.get_input_ids(text, merge_sentences=True)\n phone_ids = input_ids[\"phone_ids\"]\n for part_phone_ids in phone_ids:\n with paddle.no_grad():\n mel = self.fastspeech2_inference(part_phone_ids)\n temp_wav = self.pwg_inference(mel)\n if wav is None:\n wav = temp_wav\n else:\n wav = paddle.concat([wav, temp_wav])\n\n return wav\n\n @serving\n def generate(self, sentences: List[str], device='cpu'):\n assert isinstance(sentences, list) and isinstance(sentences[0], str), \\\n 'Input data should be List[str], but got {}'.format(type(sentences))\n\n paddle.set_device(device)\n wav_files = []\n for i, sentence in enumerate(sentences):\n wav = self(sentence)\n wav_file = str(self.output_dir.absolute() / (str(i + 1) + \".wav\"))\n sf.write(wav_file, wav.numpy(), samplerate=self.samplerate)\n wav_files.append(wav_file)\n\n logger.info('{} wave files have been generated in {}'.format(len(sentences), self.output_dir.absolute()))\n return wav_files\n" ]
[ [ "numpy.load" ] ]
lhw362950217/sqlflow
[ "a96cb7967a4b9cc82620b0286804b17e6d879f2c" ]
[ "python/runtime/feature/derivation.py" ]
[ "# Copyright 2020 The SQLFlow Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = ['infer_feature_columns', 'get_ordered_field_descs']\n\nimport re\n\nimport numpy as np\nimport six\nfrom runtime.feature.column import (CategoryIDColumn, EmbeddingColumn,\n IndicatorColumn, NumericColumn)\nfrom runtime.feature.field_desc import DataFormat, DataType, FieldDesc\nfrom runtime.verifier import fetch_samples\n\n\ndef init_column_map(target_fc_map, fc):\n \"\"\"\n Init the target_fc_map by the feature column fc.\n\n Args:\n target_fc_map (dict[str -> FeatureColumn): the feature column map,\n where the key is the field name.\n fc (FeatureColumn): the feature column object.\n\n Returns:\n None.\n \"\"\"\n if isinstance(fc, (EmbeddingColumn, IndicatorColumn)) \\\n and len(fc.get_field_desc()) == 0:\n if fc.name not in target_fc_map:\n target_fc_map[fc.name] = []\n\n target_fc_map[fc.name].append(fc)\n else:\n for fd in fc.get_field_desc():\n if fd.name not in target_fc_map:\n target_fc_map[fd.name] = []\n\n target_fc_map[fd.name].append(fc)\n\n\ndef make_feature_column_map(features):\n \"\"\"\n Build a FeatureColumn map by the features.\n\n Args:\n features (dict[str -> list[FeatureColumn]]): the\n input feature columns. The key of the dict is\n the target name, e.g. \"feature_columns\".\n\n Returns:\n A map of type dict[str -> dict[str -> list[FeatureColumn]]].\n The key of the outer dict is the target name, e.g. \"feature_columns\",\n and the key of the inner dict is the field name.\n \"\"\"\n fc_map = {}\n for target, fc_list in features.items():\n if target not in fc_map:\n fc_map[target] = {}\n\n for fc in fc_list:\n init_column_map(fc_map[target], fc)\n\n return fc_map\n\n\ndef make_field_desc_map(features):\n \"\"\"\n Build a FieldDesc dict by the features.\n\n Args:\n features (dict[str -> list[FeatureColumn]]): the\n input feature columns. The key of the dict is\n the target name, e.g. \"feature_columns\".\n\n Returns:\n A map of type dict[str -> FieldDesc], where the\n key is the field name.\n \"\"\"\n fd_map = {}\n for _, fc_list in features.items():\n for fc in fc_list:\n for fd in fc.get_field_desc():\n fd_map[fd.name] = fd\n\n return fd_map\n\n\ndef new_default_field_desc(name):\n \"\"\"\n Create a new default FieldDesc object.\n\n Args:\n name: the FieldDesc name.\n\n Returns:\n A FieldDesc object whose name is the given name,\n and the data type is INT.\n \"\"\"\n return FieldDesc(name=name, dtype=DataType.INT64)\n\n\n# A regular expression to match any real number\nREAL_NUMBER_PATTERN = re.compile(\n \"((\\\\+|-)?([0-9]+)(\\\\.[0-9]+)?)|((\\\\+|-)?\\\\.?[0-9]+)\")\n\n# A regular expression to match the form of \"3,5,7\"\nCSV_PATTERN = re.compile(\n \"((%s)\\\\,)+(%s)\" %\n (REAL_NUMBER_PATTERN.pattern, REAL_NUMBER_PATTERN.pattern))\n\n# A regular expression to match the form of \"0:3.2 7:-2.3\"\nKV_PATTERN = re.compile(\"([0-9]+:(%s)\\\\s*)+\" % REAL_NUMBER_PATTERN.pattern)\n\n# A regular expression to match multiple blanks\nBLANK_PATTERN = re.compile(\"\\\\s+\")\n\n# The Python 2/3 int64 type\nINT64_TYPE = long if six.PY2 else int # noqa: F821\n\n\ndef infer_string_data_format(str_data):\n \"\"\"\n Infer the data format of the given string.\n\n Args:\n str_data (str): a given string.\n\n Returns:\n One of PLAIN, CSV and KV.\n \"\"\"\n if CSV_PATTERN.fullmatch(str_data):\n return DataFormat.CSV\n\n if KV_PATTERN.fullmatch(str_data):\n return DataFormat.KV\n\n return DataFormat.PLAIN\n\n\ndef fill_csv_field_desc(cell, field_desc):\n \"\"\"\n Fill the FieldDesc info by the cell data in the CSV format,\n including shape, delimiter, max_id, dtype, etc. of the FieldDesc.\n\n Args:\n cell (str): the cell data of the table in the CSV format.\n field_desc (FieldDesc): the FieldDesc object.\n\n Returns:\n None.\n \"\"\"\n values = cell.split(\",\")\n if field_desc.is_sparse:\n assert field_desc.shape is not None, \\\n \"the shape of CSV format data must be given\"\n else:\n if field_desc.shape is None:\n field_desc.shape = [len(values)]\n\n size = np.prod(field_desc.shape)\n if np.prod(field_desc.shape) != len(values):\n if size > 1:\n raise ValueError(\n \"column %s should be csv format dense tensor \"\n \"of %d element(s), but got %d element(s)\" %\n (field_desc.name, np.prod(field_desc.shape), len(values)))\n\n field_desc.shape = [len(values)]\n\n # FIXME(sneaxiy): currently, we only support sparse tensor in CSV format\n # whose values are 0 or 1. The numeric values in the cell data are the\n # indices where the values of the sparse tensor are 1. For example, the\n # cell value \"3,5,7\" indicates a sparse tensor x, and\n # x[3] = x[5] = x[7] = 1, and the other values of x are all zeros. Since\n # the index is always of integer type, we force to set the data type of\n # sparse tensor in CSV format is \"Int\". We should remove this constraint\n # if we will support other data formats in the future.\n if field_desc.is_sparse:\n field_desc.dtype = DataType.INT64\n\n field_desc.delimiter = \",\"\n for v in values:\n if field_desc.dtype == DataType.INT64:\n try:\n int_value = INT64_TYPE(v)\n except ValueError:\n field_desc.dtype = DataType.FLOAT\n field_desc.max_id = 0 # clear the max id\n continue\n else:\n continue\n\n # INT type, record the maximum id\n field_desc.max_id = max(field_desc.max_id, int_value)\n\n\ndef fill_kv_field_desc(cell, field_desc):\n \"\"\"\n Fill the FieldDesc info by the cell data in the KV format,\n including shape, etc. of the FieldDesc.\n\n Args:\n cell (str): the cell data of the table in the KV format.\n field_desc (FieldDesc): the FieldDesc object.\n\n Returns:\n None.\n \"\"\"\n # split and remove empty string\n split = [s for s in BLANK_PATTERN.split(cell) if s]\n max_idx = field_desc.shape[0]\n for s in split:\n idx = INT64_TYPE(s.split(':', 2)[0]) + 1\n if idx > max_idx:\n max_idx = idx\n\n field_desc.shape[0] = max_idx\n\n\ndef fill_plain_field_desc(cell, field_desc):\n \"\"\"\n Fill the FieldDesc info by the cell data in the PLAIN format,\n including shape, dtype, vocabulary, etc. of the FieldDesc.\n This method would try to convert the cell data to be an integer\n or floating-point number if possible.\n\n Args:\n cell (str): the cell data of the table in the PLAIN format.\n field_desc (FieldDesc): the FieldDesc object.\n\n Returns:\n None.\n \"\"\"\n try:\n int_value = INT64_TYPE(cell)\n except ValueError:\n int_value = None\n\n if int_value is not None:\n field_desc.shape = [1]\n return\n\n try:\n float_value = float(cell)\n except ValueError:\n float_value = None\n\n if float_value is None:\n field_desc.dtype = DataType.STRING\n field_desc.shape = [1]\n if field_desc.vocabulary is None:\n field_desc.vocabulary = set()\n # Build vocabulary from the sample data\n field_desc.vocabulary.add(cell)\n else:\n field_desc.dtype = DataType.FLOAT\n field_desc.shape = [1]\n\n\ndef fill_field_descs(generator, fd_map):\n \"\"\"\n Fill the FieldDesc infos in the FieldDesc map by the\n generator data.\n\n Args:\n generator (generator): a generator which yields\n each row of the table data.\n fd_map (dict[str -> FieldDesc]): a FieldDesc map,\n where the key is the field name.\n\n Returns:\n None.\n \"\"\"\n names = generator.field_names\n dtypes = generator.field_types\n str_column_indices = []\n for idx, dtype in enumerate(dtypes):\n dtype = dtype.upper()\n if dtype in [\"INT\", \"TINYINT\", \"DECIMAL\", \"BIGINT\"]:\n fd_map[names[idx]].dtype = DataType.INT64\n fd_map[names[idx]].shape = [1]\n elif dtype in [\"FLOAT\", \"DOUBLE\"]:\n fd_map[names[idx]].dtype = DataType.FLOAT\n fd_map[names[idx]].shape = [1]\n elif dtype in [\"CHAR\", \"VARCHAR\", \"TEXT\", \"STRING\"]:\n str_column_indices.append(idx)\n else:\n raise ValueError(\"unsupported field type %s\" % dtype)\n\n # No string column, just return\n if not str_column_indices:\n return\n\n original_size = {}\n for name, fd in fd_map.items():\n if fd.shape is None:\n original_size[name] = 1\n else:\n original_size[name] = np.prod(fd.shape)\n\n format = [None] * len(str_column_indices)\n field_descs = [fd_map[names[i]] for i in str_column_indices]\n for row_idx, row_data in enumerate(generator()):\n row_data = [row_data[i] for i in str_column_indices]\n if row_idx == 0:\n for i, cell in enumerate(row_data):\n format[i] = infer_string_data_format(cell)\n field_descs[i].format = format[i]\n\n for i, cell in enumerate(row_data):\n if format[i] == DataFormat.PLAIN:\n fill_plain_field_desc(cell, field_descs[i])\n elif format[i] == DataFormat.CSV:\n fill_csv_field_desc(cell, field_descs[i])\n elif format[i] == DataFormat.KV:\n if original_size.get(field_descs[i].name, 1) == 1:\n if row_idx == 0:\n field_descs[i].shape = [1]\n\n fill_kv_field_desc(cell, field_descs[i])\n else:\n raise ValueError(\"unsupported data format {}\".format(\n format[i]))\n\n\ndef update_feature_column(fc, fd_map):\n \"\"\"\n Update the FeatureColumn object by the FieldDesc map.\n\n Args:\n fc (FeatureColumn): a FeatureColumn object. Only EmbeddingColumn\n and IndicatorColumn without category_column info would be\n updated currently.\n fd_map (dict[str -> FieldDesc]): a FieldDesc map, where the key is the\n field name.\n\n Returns:\n None.\n \"\"\"\n if isinstance(fc, EmbeddingColumn) and fc.category_column is None:\n field_desc = fd_map[fc.name]\n if field_desc is None:\n raise ValueError(\"column not found or inferred: %s\" % fc.name)\n\n # FIXME(typhoonzero): when to use sequence_category_id_column?\n # if column fieldDesc is SPARSE, the sparse shape should\n # be in cs.Shape[0]\n bucket_size = field_desc.shape[0]\n if not field_desc.is_sparse:\n assert field_desc.max_id > 0, \\\n \"use dense column on embedding column \" \\\n \"but did not got a correct MaxID\"\n bucket_size = field_desc.max_id + 1\n\n fc.category_column = CategoryIDColumn(field_desc, bucket_size)\n return\n\n if isinstance(fc, IndicatorColumn) and fc.category_column is None:\n field_desc = fd_map[fc.name]\n if field_desc is None:\n raise ValueError(\"column not found or inferred: %s\" % fc.name)\n\n assert field_desc.is_sparse, \\\n \"cannot use sparse column with indicator column\"\n assert field_desc.max_id > 0, \\\n \"use indicator column but did not got a correct MaxID\"\n bucket_size = field_desc.max_id + 1\n fc.category_column = CategoryIDColumn(field_desc, bucket_size)\n\n\ndef new_feature_column(field_desc):\n \"\"\"\n Create a new FeatureColumn object by the given FieldDesc object.\n\n Args:\n field_desc (FieldDesc): a given FieldDesc object.\n\n Returns:\n If field_desc.dtype is STRING, return an EmbeddingColumn object.\n Otherwise, return a NumericColumn object.\n \"\"\"\n if field_desc.dtype != DataType.STRING:\n return NumericColumn(field_desc)\n else:\n category_column = CategoryIDColumn(field_desc,\n len(field_desc.vocabulary))\n # NOTE(typhoonzero): a default embedding size of 128 is enough\n # for most cases.\n embedding = EmbeddingColumn(category_column=category_column,\n dimension=128,\n combiner=\"sum\")\n embedding.name = field_desc.name\n return embedding\n\n\ndef derive_feature_columns(targets, fc_map, fd_map, selected_field_names,\n label_name):\n \"\"\"\n Derive the FeatureColumn.\n\n Args:\n targets (list[str]): the feature column targets,\n e.g. \"feature_columns\".\n fc_map (dict[str -> dict[str -> list[FeatureColumn]]]): a FeatureColumn\n map, where the key of the outer dict is the target name, e.g.\n \"feature_columns\", and the key of the inner dict is the field name.\n fd_map (dict[str -> FieldDesc]): a FieldDesc map, where the key is the\n field name.\n selected_field_names (list[str]): the selected field name of the SQL\n statement.\n label_name (str): the label name of the TO TRAIN statement.\n\n Returns:\n None.\n \"\"\"\n for target in targets:\n if target not in fc_map:\n fc_map[target] = {}\n\n fc_target_map = fc_map[target]\n\n new_fc_target_map = {} # field_name -> list(FeatureColumn)\n for field_name in fc_target_map:\n if field_name in selected_field_names:\n new_fc_target_map[field_name] = fc_target_map[field_name]\n continue\n\n if len(fc_map) > 1:\n raise ValueError(\"cannot expand '%s' in COLUMN clause\",\n field_name)\n\n field_pattern = re.compile(field_name, flags=re.I)\n match_field_name = None\n for selected_field_name in selected_field_names:\n if field_pattern.fullmatch(selected_field_name):\n assert match_field_name is None, \\\n \"%s matches duplicate fields\" % field_name\n match_field_name = selected_field_name\n\n if match_field_name is None:\n raise ValueError(\n \"'%s' in COLUMN clause does not match any selected fields\"\n % field_name)\n\n new_fc = fc_target_map[match_field_name][\n 0].new_feature_column_from(fd_map[match_field_name])\n new_fc_target_map[match_field_name] = [new_fc]\n del fd_map[field_name]\n\n # ================== MAIN LOOP ==================\n # Update or generate FeatureColumn for each selected field:\n for selected_field_name in selected_field_names:\n if label_name == selected_field_name:\n continue # ignore label field\n\n fc_list = new_fc_target_map.get(selected_field_name)\n if fc_list is not None:\n for fc in fc_list:\n update_feature_column(fc, fd_map)\n else:\n if len(fc_map) > 1:\n # if column clause have more than one target, each target\n # should specify the full list of the columns to use.\n continue\n\n field_desc = fd_map[selected_field_name]\n if field_desc is None:\n raise ValueError(\"column not found or inferred: %s\" %\n selected_field_name)\n new_fc = new_feature_column(field_desc)\n new_fc_target_map[selected_field_name] = [new_fc]\n\n fc_target_map.clear()\n fc_target_map.update(new_fc_target_map)\n\n\ndef update_ir_feature_columns(features, fc_map, selected_field_names,\n label_name):\n \"\"\"\n Update the IR FeatureColumn map `features` by the derived FeatureColumn map\n `fc_map` . If any FeatureColumn inside `fc_map` does not exist in\n `features`, it would be added to `features` . Notice that `features` is not\n updated in-place, and we would return a new updated IR FeatureColumn map in\n this method.\n\n Args:\n features (dict[str -> list[FeatureColumn]]): the input IR FeatureColumn\n map to be updated. The key of the dict is the target name, e.g.\n \"feature_columns\".\n fc_map (dict[str -> dict[str -> list[FeatureColumn]]]): a derived\n FeatureColumn map, where the key of the outer dict is the target\n name, e.g. \"feature_columns\", and the key of the inner dict is\n the field name.\n label_name (str): the label name of the TO TRAIN statement.\n selected_field_names (list[str]): the selected field name of the SQL\n statement.\n\n Returns:\n A new IR FeatureColumn map of dict[str -> list[FeatureColumn]], which\n is updated from the inputs `features` and `fc_map` .\n \"\"\"\n new_ir_feature_columns = {}\n for target, target_fc_map in fc_map.items():\n new_fc_list = []\n for field_name in selected_field_names:\n if field_name == label_name:\n continue\n\n fc_list = target_fc_map[field_name]\n if fc_list is None:\n raise ValueError(\"column not found or inferred: %s\" %\n field_name)\n\n for fc in fc_list:\n if fc not in new_fc_list:\n new_fc_list.append(fc)\n\n single_fd_fcs = []\n multi_fd_fcs = []\n for fc in new_fc_list:\n field_desc_num = len(fc.get_field_desc())\n assert field_desc_num > 0, \"FieldDesc number must be larger than 0\"\n if field_desc_num == 1:\n single_fd_fcs.append(fc)\n else:\n multi_fd_fcs.append(fc)\n\n if multi_fd_fcs:\n original_fc_list = features[target]\n indices = []\n for fc in multi_fd_fcs:\n found = False\n for i, original_fc in enumerate(original_fc_list):\n if fc == original_fc:\n indices.append(i)\n found = True\n break\n\n if not found:\n raise ValueError(\"some feature column is missing in the \"\n \"derivation stage\")\n\n sorted_pos = sorted(range(len(indices)), key=lambda k: indices[k])\n multi_fd_fcs = [multi_fd_fcs[i] for i in sorted_pos]\n\n new_fc_list = single_fd_fcs + multi_fd_fcs\n new_ir_feature_columns[target] = new_fc_list\n\n return new_ir_feature_columns\n\n\ndef derive_label(label, fd_map):\n \"\"\"\n Derive the feature column of the label.\n\n Args:\n label (FeatureColumn): the FeatureColumn object of the label.\n fd_map: (dict[str -> FieldDesc]): a FieldDesc map, where the key is the\n field name.\n\n Returns:\n A derived NumericColumn of the label.\n \"\"\"\n label_name = label.get_field_desc()[0].name if label is not None else None\n if not label_name:\n return # NOTE: clustering model may not specify Label\n\n label_field_desc = fd_map[label_name]\n assert label_field_desc is not None, \\\n \"deriveLabel: LABEL COLUMN '%s' not found\" % label_name\n\n # use shape [] if label shape is [1] for TensorFlow scalar label\n # shape should be [].\n shape = label_field_desc.shape\n if shape is None or (len(shape) == 1 and shape[0] == 1):\n label_field_desc.shape = []\n\n return NumericColumn(label_field_desc)\n\n\ndef infer_feature_columns(conn, select, features, label, n=1000):\n \"\"\"\n Infer the FeatureColumns.\n\n Args:\n conn: the database connection object.\n select (str): the select SQL statement.\n features (dict[str -> list[FeatureColumn]]): the input feature\n columns. The key of the dict is the target name, e.g.\n \"feature_columns\".\n label (FeatureColumn): the FeatureColumn object of the label.\n n (int): the sample number to be fetched in the table. Default\n 1000.\n\n Returns:\n A tuple of (new_features, new_label), which can be accepted by IR.\n \"\"\"\n if features is None:\n features = {}\n\n fc_map = make_feature_column_map(features)\n fd_map = make_field_desc_map(features)\n\n generator = fetch_samples(conn, select, n)\n if generator is None:\n raise ValueError(\"empty dataset\")\n\n selected_field_names = generator.field_names\n assert len(set(selected_field_names)) == len(selected_field_names), \\\n \"duplicate selected field names\"\n\n for name in selected_field_names:\n if name not in fd_map:\n fd_map[name] = new_default_field_desc(name)\n\n fill_field_descs(generator, fd_map)\n label_name = label.get_field_desc()[0].name if label is not None else None\n\n targets = list(features.keys())\n if not targets:\n targets.append(\"feature_columns\")\n\n derive_feature_columns(targets, fc_map, fd_map, selected_field_names,\n label_name)\n features = update_ir_feature_columns(features, fc_map,\n selected_field_names, label_name)\n label = derive_label(label, fd_map)\n return features, label\n\n\ndef get_ordered_field_descs(features):\n assert isinstance(features, dict)\n fd_list = []\n for target in features:\n for fc in features[target]:\n for fd in fc.get_field_desc():\n fd_list.append(fd)\n return fd_list\n" ]
[ [ "numpy.prod" ] ]
kaihsin/Tor10
[ "81194d3a65f328752fd25a1ce15cb1bf38e99457" ]
[ "tor10/UniTensor.py" ]
[ "## [DEBUG] >>> \n## Note, set this to True to enable debug section\nDEBUG = False\n## <<<\n\n\nimport os\nimport pickle as pkl\n\nimport torch\n\nfrom . import linalg\nfrom .Bond import *\nfrom .Bond import _fx_GetCommRows\n\n\n## Developer Note:\n## [KHW]\n## from v0.3+, we deprecate dense Symmetry. \n## Using a is_symm as master switch. \n## Find \"[Fusion tree]\" keyword for future extend of non-abelian / fermion etc. \n## Find \"DEBUG\" keywork to comment the debug section when in release!!. \n\ndef _fx_decompress_idx(x, accu_offsets):\n y = []\n for i in range(len(accu_offsets)):\n y.append(np.array(x / accu_offsets[i]).astype(np.int))\n x = x % accu_offsets[i]\n return np.array(y).swapaxes(0, 1)\n\n\nclass UniTensor:\n\n def _mac(self, torch_tensor=None, braket=None, sym_mappers=None):\n \"\"\"\n Memory Allocation and Check (_mac)\n\n torch_tensor :\n This is the internal arguments in current version. It should not be directly use, otherwise may cause inconsistence with Bonds and memory layout.\n *For Developer:\n > The torch_tensor should have the same rank as len(label), and with each bond dimensions strictly the same as describe as in bond in self.bonds.\n\n check :\n If False, all the checking across bonds/labels/Storage.shape will be ignore.\n\n braket :\n If set, the braket -1 or +1 indicate the bond are BD_KET or BD_BRA.\n It is handy for calculating reverse quantum flow / blocks when bra-bond is permuted to col-space\n (unmatched braket)\n\n sym_mappers:\n A tuple, used to pass the shallow permute informations / block mapping information.\n \"\"\"\n if braket is not None:\n self.braket = copy.deepcopy(braket)\n self._check_braket()\n\n if sym_mappers is not None:\n self._mapper = copy.deepcopy(sym_mappers[0])\n self._inv_mapper = copy.deepcopy(sym_mappers[1])\n\n self._Ket_mapper_blks = copy.deepcopy(sym_mappers[2])\n self._Ket_invmapper_blks = copy.deepcopy(sym_mappers[3])\n self._Bra_mapper_blks = copy.deepcopy(sym_mappers[4])\n self._Bra_invmapper_blks = copy.deepcopy(sym_mappers[5])\n self._contiguous = copy.deepcopy(sym_mappers[6])\n self._accu_off_in = copy.deepcopy(sym_mappers[7])\n self._accu_off_out = copy.deepcopy(sym_mappers[8])\n self._block_qnums = copy.deepcopy(sym_mappers[9])\n # if torch_tensor is None:\n # raise TypeError(\"UniTensor.__init__\",\"[ERROR], pass the interface must accompany with torch_tensor\")\n\n if torch_tensor is not None:\n self.Storage = torch_tensor\n\n def __init__(self, bonds, rowrank=None, labels=None, device=torch.device(\"cpu\"), dtype=torch.float64, is_diag=False,\n requires_grad=False, name=\"\", check=True):\n \"\"\"\n This is the constructor of UniTensor.\n\n Public Args:\n\n bonds:\n List of bonds.\n It should be an list or np.ndarray with len(list) being the number of bonds.\n\n rowrank:\n The number of bonds in row-space.\n The first [rowrank] bonds will be define as the row-space (which means the row space when flatten as Matrix), and the other bonds will be defined as in the col-space (which is the column space when flatten as Matrix).\n When interprete the memory layout as Matrix, the combine of first rowrank bonds will be the row and the other bond will be column.\n\n\n labels:\n The label of each bond.\n 1. the number of elements should be the same as the total rank of the tensor, contain no duplicated elements.\n 2. all the label should be integer. if the label is specify as floating point, it will be rounded as integer.\n\n device:\n This should be a [torch.device]. When provided, the tensor will be put on the device (\"cpu\", \"cuda\", \"cuda:x\" with x is the GPU-id. See torch.device for further information.)\n\n dtype :\n This should be a [ torch.dtype ].\n *The default type is float with either float32 or float64 which follows the same internal rule of pytorch. For further information, see pytorch documentation.\n\n is_diag:\n This states if the current UniTensor is a diagonal matrix or not. If True, the Storage will only store diagonal elements.\n Note that if is_diag=True, then the UniTensor is strictly required to be a square rank-2 tensor.\n\n requires_grad:\n Activate the autograd function for UniTensor. This is the same as torch.Tensor\n\n name:\n This states the name of current UniTensor.\n\n \n Example for how to create a UniTensor:\n\n * create a rank-2 untagged UniTensor (matrix) with shape (3,4):\n >>> a = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4)],rowrank=1)\n >>> a.Print_diagram(bond_info=True)\n -----------------------\n tensor Name : \n tensor Rank : 2\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 0 ____| 3 4 |____ 1 \n \\ / \n ------------- \n lbl:0 Dim = 3 |\n REG :\n _\n lbl:1 Dim = 4 |\n REG :\n\n * create a rank-3 untagged UniTensor with one bond in row-space and two bonds in col-space, shape (3,4,5) and set labels [-3,4,1] for each bond:\n >>> c = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4),tor10.Bond(5)],rowrank=1,labels=[-3,4,1])\n >>> c.Print_diagram(bond_info=True)\n tensor Name : \n tensor Rank : 3\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n -3 ____| 3 4 |____ 4 \n | | \n | 5 |____ 1 \n \\ / \n ------------- \n lbl:-3 Dim = 3 |\n REG :\n _\n lbl:4 Dim = 4 |\n REG :\n _\n lbl:1 Dim = 5 |\n REG :\n\n\n * create a rank-0 UniTensor\n >>> rk0t = tor10.UniTensor(bonds=[])\n >>> rk0t.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 0\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n \\ / \n ------------- \n\n >>> print(rk0t)\n Tensor name: \n is_diag : False\n tensor(0., dtype=torch.float64) \n\n * create a rank-3 tagged UniTensor with two bonds in row-space and two bonds in col-space, shape (2,3,4,5)\n >>> bds = [tor10.Bond(2,tor10.BD_KET),tor10.Bond(3,tor10.BD_KET),tor10.Bond(4,tor10.BD_BRA),tor10.Bond(5,tor10.BD_BRA)]\n >>> o = tor10.UniTensor(bonds=bds,rowrank=2)\n >>> o.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n braket_form : True\n |ket> <bra| \n --------------- \n | | \n 0 > __| 2 4 |__ < 2 \n | | \n 1 > __| 3 5 |__ < 3 \n | | \n --------------- \n\n\n * note that if the BRA bond is not in the col-space, or KET bond is not in the row-space, the tensor is in the so called \"non-braket_form, which will have a * symbol indicating the mismatch.\"\n >>> bd2 = [tor10.Bond(2,tor10.BD_KET),tor10.Bond(5,tor10.BD_BRA),tor10.Bond(4,tor10.BD_BRA),tor10.Bond(3,tor10.BD_KET)]\n >>> c_mismatch = tor10.UniTensor(bonds=bd2,rowrank=2)\n >>> c_mismatch.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n braket_form : False\n |ket> <bra| \n --------------- \n | | \n 0 > __| 2 4 |__ < 2 \n | | \n 1 <*__| 5 3 |__*> 3 \n | | \n --------------- \n\n\n * create a rank-2 UniTensor with one inbond, one outbond, shape (3,4) on GPU-0:\n >>> d = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4)],rowrank=1,device=torch.device(\"cuda:0\"))\n\n * create a diagonal 6x6 rank-2 tensor(matrix):\n Note that if is_diag is True, rowrank must be 1.\n >>> e = tor10.UniTensor(bonds=[tor10.Bond(6),tor10.Bond(6)],rowrank=1,is_diag=True)\n\n Note that when is_diag is set to True, the UniTensor should be a square matrix.\n\n * create a rank-3 UniTensor with two bonds in row-space and one bond in col-space, and single precision:\n >>> f = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4),tor10.Bond(5)],rowrank=2,labels=[-3,4,1],dtype=torch.float32)\n\n * create a rank-3 UniTensor with U1 symmetry:\n >>> bd_sym_1 = tor10.Bond(3,tor10.BD_KET,qnums=[[0],[1],[2]])\n >>> bd_sym_2 = tor10.Bond(4,tor10.BD_KET,qnums=[[-1],[2],[0],[2]])\n >>> bd_sym_3 = tor10.Bond(5,tor10.BD_BRA,qnums=[[4],[2],[-1],[5],[1]])\n >>> symT = tor10.UniTensor(bonds=[bd_sym_1,bd_sym_2,bd_sym_3],rowrank=2,labels=[10,11,12])\n >>> symT.Print_diagram(bond_info=True)\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: True\n on device : cpu\n braket_form : True\n |ket> <bra| \n --------------- \n | | \n 10 > __| 3 5 |__ < 12 \n | | \n 11 > __| 4 | \n | | \n --------------- \n lbl:10 Dim = 3 |\n KET : U1:: +2 +1 +0\n _\n lbl:11 Dim = 4 |\n KET : U1:: +2 +2 +0 -1\n _ \n lbl:12 Dim = 5 |\n BRA : U1:: +5 +4 +2 +1 -1\n\n\n \"\"\"\n\n ## general property:---------------------------------\n self.name = name\n\n ## bonds:\n self.bonds = np.array([copy.deepcopy(bonds[i]) for i in range(len(bonds))])\n\n # labels: \n if labels is None:\n if len(self.bonds) == 0:\n self.labels = np.array([], dtype=np.int)\n else:\n self.labels = np.arange(len(self.bonds))\n else:\n self.labels = np.array(copy.deepcopy(labels), dtype=np.int)\n\n ## checking :\n if check:\n # check # of labels consist with bond.\n if not len(self.labels) == (len(self.bonds)):\n raise Exception(\"UniTensor.__init__\", \"labels size is not consistence with the rank\")\n # Bonds:\n if rowrank is not None:\n if rowrank < 0 or rowrank > len(self.bonds):\n raise Exception(\"UniTensor.__init__\", \"the rowrank should be >=0 and < # of bonds\")\n\n if len(self.bonds) != 0:\n\n ## check duplicate label\n if not len(np.unique(self.labels)) == len(self.labels):\n raise Exception(\"UniTensor.__init__\", \"labels contain duplicate element.\")\n\n ## check qnums:\n isSymm = np.unique([(bd.qnums is None) for bd in self.bonds])\n if len(isSymm) != 1:\n raise TypeError(\"UniTensor.__init__\",\n \"the bonds are not consistent. Cannot have mixing bonds of with and without symmetry (qnums).\")\n else:\n if is_diag:\n raise Exception(\"UniTensor.__init__\", \"the scalar tensor (rank-0) cannot have is_diag=True.\")\n\n ## braket, is_braket:\n # is_tag = False if len(self.bonds)==0 else (self.bonds[0].bondType != BD_REG)\n self.is_braket = None\n self.braket = None\n self.rowrank = rowrank\n\n if check:\n if len(self.bonds) != 0:\n if self.bonds[0].bondType != BD_REG:\n self.braket = np.array([BondType[self.bonds[i].bondType] for i in range(len(self.bonds))],\n dtype=np.int)\n\n if self.rowrank is None:\n if len(self.bonds) == 0:\n self.rowrank = 0\n else:\n if self.braket is not None:\n self.rowrank = len(np.argwhere(self.braket == BondType[BD_KET]))\n else:\n raise Exception(\n \"[ERROR] for UniTensor init with all the bond are regular, rowrank should be provided\")\n else:\n self.rowrank = int(rowrank)\n\n self._check_braket()\n\n ## check is_symm:\n self.is_symm = False if len(self.bonds) == 0 else (self.bonds[0].qnums is not None)\n self.is_diag = is_diag\n\n if not self.is_symm:\n ## non-symmetry properties:----------------------------\n self.is_diag = is_diag\n if check:\n if is_diag:\n if not len(self.labels) == 2:\n raise TypeError(\"UniTensor.__init__\", \"is_diag=True require Tensor rank==2\")\n\n if not self.rowrank == 1:\n raise TypeError(\"UniTensor.__init__\",\n \"is_diag=True require Tensor rank==2, with 1 inbond and 1 outbond (rowrank=1)\")\n\n if not self.bonds[0].dim == self.bonds[1].dim:\n raise TypeError(\"UniTensor.__init__\", \"is_diag=True require Tensor to be square rank-2\")\n\n if self.is_diag:\n self.Storage = torch.zeros(self.bonds[0].dim, device=device, dtype=dtype)\n else:\n if len(self.bonds) != 0:\n DALL = [self.bonds[i].dim for i in range(len(self.bonds))]\n self.Storage = torch.zeros(tuple(DALL), device=device, dtype=dtype)\n del DALL\n else:\n self.Storage = torch.tensor(0, device=device, dtype=dtype)\n\n # self.Storage = torch_tensor\n\n else:\n ## Symmetry properties-------------------------------:\n if check:\n if self.bonds[0].qnums is not None:\n if len(np.unique([bd.nsym for bd in self.bonds])) != 1:\n raise TypeError(\"UniTensor.__init__\",\n \"the number of symmetry type for symmetry bonds doesn't match.\")\n if self.rowrank < 1 or self.rowrank >= len(self.bonds):\n raise TypeError(\"UniTensor.__init__\",\n \"[ERROR] tensor with symmetry must have at least one rank for row space and one rank for column space\")\n\n nket = len(np.argwhere(self.braket == BondType[BD_BRA]).flatten())\n if nket < 1 or nket >= len(self.bonds):\n raise TypeError(\"UniTensor.__init__\",\n \"[ERROR] tensor with symmetry must have at least one bra-bond and one ket-bond\")\n\n ## only activate when symmetry is on.\n self._Ket_mapper_blks = None ## this follow memory\n self._Bra_mapper_blks = None ## this follow memory\n self._Ket_invmapper_blks = None ## this follow memory\n self._Bra_invmapper_blks = None ## this follow memory\n self._mapper = None ## memory idx to real idx\n self._inv_mapper = None ## real idx to memory index\n self._contiguous = True\n self._accu_off_in = None ## this follows memory\n self._accu_off_out = None ## this follows memory\n self._block_qnums = None ## this follows real Tensor, not memory!!!\n\n ## memory contiguous mapper this \n if check:\n\n # calc offsets\n accu_off = []\n tmp = 1\n for i in range(len(self.bonds)):\n accu_off.append(tmp)\n tmp *= self.bonds[-1 - i].dim\n accu_off = np.array(accu_off[::-1])\n self._accu_off_in = (accu_off[:self.rowrank] / accu_off[self.rowrank - 1]).astype(np.int)\n self._accu_off_out = accu_off[self.rowrank:]\n del accu_off\n\n ## mapper \n self._mapper = np.arange(len(self.bonds)).astype(np.int)\n self._inv_mapper = copy.copy(self._mapper)\n\n ## Get common qnums for in and out b\n b_tqin, b_tqout = self.GetTotalQnums(physical=False)\n tqin_uni = b_tqin.GetUniqueQnums()\n tqout_uni = b_tqout.GetUniqueQnums()\n C = _fx_GetCommRows(tqin_uni, tqout_uni)\n if len(C.flatten()) == 0:\n raise TypeError(\"UniTensor.__init__\",\n \"[ERROR] no vaild block in current Tensor. please check total qnums in total bra/ket bonds have at least one same set of qnums.\")\n\n self.Storage = []\n self._Ket_invmapper_blks = []\n self._Bra_invmapper_blks = []\n self._Ket_mapper_blks = -np.ones((b_tqin.dim, 2)).astype(np.int)\n self._Bra_mapper_blks = -np.ones((b_tqout.dim, 2)).astype(np.int)\n self._block_qnums = []\n\n for b in range(len(C)):\n comm = tuple(C[b])\n idx_in = np.argwhere((b_tqin.qnums == comm).all(axis=1)).flatten()\n idx_out = np.argwhere((b_tqout.qnums == comm).all(axis=1)).flatten()\n self.Storage.append(torch.zeros((len(idx_in), len(idx_out)), device=device, dtype=dtype))\n\n ## interface\n self._Ket_invmapper_blks.append(_fx_decompress_idx(idx_in, self._accu_off_in))\n self._Ket_mapper_blks[idx_in, 0] = b\n self._Ket_mapper_blks[idx_in, 1] = np.arange(len(idx_in)).astype(np.int)\n\n ## interface\n self._Bra_invmapper_blks.append(_fx_decompress_idx(idx_out, self._accu_off_out))\n self._Bra_mapper_blks[idx_out, 0] = b\n self._Bra_mapper_blks[idx_out, 1] = np.arange(len(idx_out)).astype(np.int)\n self._block_qnums = C\n\n if check:\n if requires_grad:\n self.requires_grad(True)\n\n def tag_braket(self, tags=None):\n if self.braket is None:\n if tags is None:\n self.braket = []\n for b_in in range(self.rowrank):\n self.bonds[b_in].bondType = BD_KET\n self.braket.append(BondType[BD_KET])\n for b_out in range(len(self.bonds) - self.rowrank):\n self.bonds[b_out + self.rowrank].bondType = BD_BRA\n self.braket.append(BondType[BD_BRA])\n else:\n # check:\n if len(tags) != len(self.bonds):\n raise ValueError(\"[ERROR] tags must match the rank of bonds.\")\n if all([x == BD_REG for x in rags]):\n raise ValueError(\"[ERROR] tags cannot contain BD_REG\")\n for b in range(len(self.bonds)):\n self.bonds[b].bondType = tags[b]\n self.braket.append(BondType[tags[b]])\n\n self.braket = np.array(self.braket)\n self.is_braket = True\n\n def untag_braket(self):\n if self.is_symm:\n raise Exception(\"[ERROR]\", \"Cannot untag bra/ket on the bonds for symmetry tensor.\")\n\n if self.braket is None:\n pass\n\n else:\n self.is_braket = None\n self.braket = None\n for b in range(len(self.bonds)):\n self.bonds[b].bondType = BD_REG\n\n def _check_braket(self):\n \"\"\"\n This is internal function!!\n \"\"\"\n if self.braket is not None:\n if (self.braket[:self.rowrank] == BondType[BD_KET]).all() and (\n self.braket[self.rowrank:] == BondType[BD_BRA]).all():\n self.is_braket = True\n else:\n self.is_braket = False\n\n def is_braket_form(self):\n \"\"\" \n Return if the current tensor is in braket_form. It can only be called on a tagged UniTensor\n (with or without symmetries)\n\n Return:\n\n bool. \n \n\n \"\"\"\n if self.braket is None:\n raise Exception(\"[ERROR] for a tensor with regular bonds, there is no property of barket.\")\n\n return self.is_braket\n\n def braket_form(self):\n \"\"\"\n Permute the UniTensor to bra-ket form. \n\n [Tech.Note] the permuted UniTensor can be non-contiguous depending on the underlying memory layout. \n\n Return :\n self\n\n \"\"\"\n if self.braket is None:\n raise Exception(\"[ERROR] for a tensor with regular bonds, there is no property of barket.\")\n x = np.argsort(self.braket)\n Nin = len(np.argwhere(self.braket == BondType[BD_KET]))\n self.Permute(x, rowrank=Nin, by_label=False)\n return self\n\n def SetLabel(self, newLabel, idx):\n \"\"\"\n Set a new label for the bond at index :idx:\n\n Args:\n\n newLabel: The new label, it should be an integer.\n\n idx : The index of the bond. when specified, the label of the bond at this index will be changed.\n\n Example:\n\n >>> g = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4)],rowrank=1,labels=[5,6])\n >>> g.labels\n [5 6]\n\n\n Set \"-1\" to replace the original label \"6\" at index 1\n\n >>> g.SetLabel(-1,1)\n >>> g.labels\n [5 -1]\n\n \"\"\"\n if not type(newLabel) is int or not type(idx) is int:\n raise TypeError(\"UniTensor.SetLabel\", \"newLabel and idx must be int.\")\n\n if not idx < len(self.labels):\n raise ValueError(\"UniTensor.SetLabel\", \"idx exceed the number of bonds.\")\n\n if newLabel in self.labels:\n raise ValueError(\"UniTensor.SetLabel\", \"newLabel [%d] already exists in the current UniTensor.\" % newLabel)\n\n self.labels[idx] = newLabel\n\n def SetLabels(self, newlabels):\n \"\"\"\n Set new labels for all the bonds.\n\n Args:\n\n newLabels: The list of new labels, it should be a list or numpy array with size equal to the number of bonds of the UniTensor.\n\n Example:\n\n >>> g = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4)],rowrank=1,labels=[5,6])\n >>> g.labels\n [5 6]\n\n Set new_label=[-1,-2] to replace the original label [5,6].\n\n >>> new_label=[-1,-2]\n >>> g.SetLabels(new_label)\n >>> g.labels\n [-1 -2]\n\n \"\"\"\n if isinstance(newlabels, list):\n newlabels = np.array(newlabels)\n\n if not len(newlabels) == len(self.labels):\n raise ValueError(\"UniTensor.SetLabels\",\n \"the length of newlabels does not match with the rank of UniTensor.\")\n\n if len(np.unique(newlabels)) != len(newlabels):\n raise ValueError(\"UniTensor.SetLabels\", \"the newlabels contain duplicate entries.\")\n\n self.labels = copy.copy(newlabels)\n\n def SetName(self, name):\n \"\"\"\n Set the name of the UniTensor\n\n Args:\n\n name:\n a string.\n\n \"\"\"\n if not isinstance(name, str):\n raise TypeError(\"UniTensor.str\", \"the name should be a string.\")\n\n self.name = name\n\n return self\n\n def SetElem(self, elem):\n \"\"\"\n Given 1D array of elements, set the elements stored in tensor as the same as the given ones. Note that elem can only be python-list or numpy\n\n Args:\n\n elem:\n The elements to be replace the content of the current UniTensor. It should be a 1D array.\n **Note** if the UniTensor is a tensor with symmetry, one should use UniTensor.PutBlock to set the elements.\n\n Example:\n ::\n Sz = tor10.UniTensor(bonds=[tor10.Bond(2),tor10.Bond(2)],rowrank=1,\n dtype=torch.float64,\n device=torch.device(\"cpu\"))\n Sz.SetElem([1, 0,\n 0,-1 ])\n\n\n >>> print(Sz)\n Tensor name: \n is_diag : False\n tensor([[ 1., 0.],\n [ 0., -1.]], dtype=torch.float64)\n\n \"\"\"\n if not isinstance(elem, list) and not isinstance(elem, np.ndarray):\n raise TypeError(\"UniTensor.SetElem\", \"[ERROR] elem can only be python-list or numpy\")\n\n ## Qnum_ipoint [OKv03]\n if self.is_symm:\n raise Exception(\"UniTensor.SetElem\", \"[ERROR] the TN that has symm should use PutBlock.\")\n\n if not len(elem) == self.Storage.numel():\n raise ValueError(\"UniTensor.SetElem\", \"[ERROR] number of elem is not equal to the # of elem in the tensor.\")\n\n raw_elems = np.array(elem)\n if len(raw_elems.shape) != 1:\n raise Exception(\"UniTensor.SetElem\", \"[ERROR] can only accept 1D array of elements.\")\n\n my_type = self.Storage.dtype\n my_shape = self.Storage.shape\n my_device = self.Storage.device\n self.Storage = torch.from_numpy(raw_elems).type(my_type).reshape(my_shape).to(my_device)\n\n def SetRowRank(self, new_rowrank):\n \"\"\"\n Set the RowRank while keep the tensor indices.\n \n Args:\n \n new_rowrank: \n should be a unsigned int. \n \n [Note] for UniTensor with symmetry, it should have at least one bond in row-space and one bond in col-space. which means rowrank must >=1 and <= (rank of UniTensor)-1 \n\n Return:\n\n self\n\n \"\"\"\n if self.is_symm:\n ##check:\n if new_rowrank < 1 or len(self.labels) - new_rowrank < 1:\n raise Exception(\"[ERROR]\",\n \"SetRowRank for a tensor with symmetry must have at least 1 bond in row-space and 1 bond in col-space\")\n self.rowrank = int(new_rowrank)\n else:\n if new_rowrank < 0 or new_rowrank > len(self.labels):\n raise Exception(\"[ERRROR]\",\n \"Invalid Rowrank. Must >=0 and <= rank of tensor for non-symmetry UniTensor\")\n self.rowrank = int(new_rowrank)\n\n self._check_braket()\n return self\n\n def Todense_(self):\n \"\"\"\n Set the current UniTensor to dense matrix.\n [v0.3+] This only affect on UniTensor with non-symmetry with diag=True.\n\n Return:\n self\n\n Example:\n\n >>> a = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(3)],rowrank=1,is_diag=True)\n >>> a.SetElem([1,2,3])\n >>> print(a.is_diag)\n True\n\n >>> print(a)\n Tensor name: \n is_diag : True\n tensor([1., 2., 3.], dtype=torch.float64)\n\n >>> a.Todense_()\n >>> print(a.is_diag)\n False\n\n >>> print(a)\n Tensor name: \n is_diag : False\n tensor([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]], dtype=torch.float64)\n\n \"\"\"\n if self.is_symm:\n raise Exception(\"UniTensor.Todense()\", \"[ERROR] cannot transform to dense for UniTensor with symmetry\")\n\n if self.is_diag:\n self.Storage = torch.diag(self.Storage)\n self.is_diag = False\n\n return self\n\n def Todense(self):\n \"\"\"\n Return a dense version of current UniTensor. This only affect on non-symmetric UniTensor with is_diag=True.\n \n [Note] for symmetric UniTensor, Todense cannot be called.\n\n Return:\n new UniTensor if current tensor is_diag=True, otherwise return self.\n\n Example:\n\n >>> a = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(3)],rowrank=1,is_diag=True)\n >>> print(a.is_diag)\n True\n\n >>> dense_a = a.Todense()\n >>> print(dense_a.is_diag)\n False\n\n >>> print(a.is_diag)\n True\n\n\n \"\"\"\n if self.is_symm:\n raise Exception(\"UniTensor.Todense()\", \"[ERROR] cannot transform to dense form for symmetric UniTensor\")\n\n if self.is_diag:\n out = copy.deepcopy(self)\n out.Todense_()\n return out\n else:\n return self\n\n def to_(self, device):\n \"\"\"\n Set the current UniTensor to device\n\n Args:\n\n device:\n This should be an [torch.device]\n torch.device(\"cpu\") for put the tensor on host (cpu)\n torch.device(\"cuda:x\") for put the tensor on GPU with index x\n\n Return:\n \n self\n\n Example:\n\n Construct a tensor (default is on cpu)\n\n >>> a = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4)],rowrank=1)\n\n Set to GPU.\n\n >>> a.to_(torch.device(\"cuda:0\"))\n\n\n \"\"\"\n if not isinstance(device, torch.device):\n raise TypeError(\"[ERROR] UniTensor.to()\", \"only support device argument in this version as torch.device\")\n\n if self.device != device:\n if self.is_symm:\n for s in range(len(self.Storage)):\n self.Storage[s] = self.Storage[s].to(device)\n else:\n self.Storage = self.Storage.to(device)\n\n return self\n\n def to(self, device):\n \"\"\"\n Set the current UniTensor to device. If device is not the same with current tensor, return a new UniTensor,\n otherwise return self.\n\n Args:\n\n device:\n This should be an [torch.device]\n torch.device(\"cpu\") for put the tensor on host (cpu)\n torch.device(\"cuda:x\") for put the tensor on GPU with index x\n\n Return:\n \n Self if the device is the same as the current UniTensor. Otherwise, return a new UniTensor\n\n Example:\n\n Construct a tensor (default is on cpu)\n\n >>> a = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4)],rowrank=1)\n\n Set to GPU.\n\n >>> b = a.to(torch.device(\"cuda:0\"))\n >>> print(b is a)\n False\n\n >>> b = a.to(torch.device(\"cpu\"))\n >>> print(b is a)\n True\n\n\n \"\"\"\n if not isinstance(device, torch.device):\n raise TypeError(\"[ERROR] UniTensor.to()\", \"only support device argument in this version as torch.device\")\n\n if self.device != device:\n out = copy.deepcopy(self)\n out.to_(device)\n return out\n else:\n return self\n\n def Print_diagram(self, bond_info=False):\n \"\"\"\n This is the beauty print of the tensor diagram. Including the information of the current device\n ::\n 1.The left hand side is always the in-bonds,representing the row-space when flatten as Matrix;\n the right hand side is always the Out-bonds, representing the column-space when flatten as Matrix.\n 2.The number attached to the outside of each leg is the Bond-dimension.\n 3.The number attached to the inside of each leg is the label.\n 4.if all the bra-bonds are in row-space (in-bonds), and all ket-bonds are in col-space (out-bonds),\n the tensor is in \"braket_form\".\n 5.if one permute bra-bonds that should be in-bonds to out-bonds, this will put the UniTensor in a\n \"non-braket_form\". the bond will have a \"*\" symbol on it.\n\n\n Args:\n \n bond_info [default: False]\n\n if set to True, the info of each bond will be printed.\n\n \"\"\"\n print(\"-----------------------\")\n print(\"tensor Name : %s\" % self.name)\n print(\"tensor Rank : %d\" % (len(self.labels)))\n print(\"has_symmetry: %s\" % (\"True\" if self.is_symm else \"False\"))\n if self.is_symm:\n print(\"on device : %s\" % self.Storage[0].device)\n else:\n print(\"on device : %s\" % self.Storage.device)\n print(\"is_diag : %s\" % (\"True\" if self.is_diag else \"False\"))\n\n Nin = self.rowrank\n Nout = len(self.bonds) - self.rowrank\n if Nin > Nout:\n vl = Nin\n else:\n vl = Nout\n\n if self.braket is not None:\n print(\"braket_form : %s\" % (\"True\" if self.is_braket else \"False\"))\n print(\" |ket> <bra| \")\n print(\" --------------- \")\n for i in range(vl):\n print(\" | | \")\n if i < Nin:\n if self.braket[i] == BondType[BD_KET]:\n bks = \"> \"\n else:\n bks = \"<*\"\n l = \"%3d %s__\" % (self.labels[i], bks)\n llbl = \"%-3d\" % self.bonds[i].dim\n else:\n l = \" \"\n llbl = \" \"\n if i < Nout:\n if self.braket[Nin + i] == BondType[BD_KET]:\n bks = \"*>\"\n else:\n bks = \" <\"\n r = \"__%s %-3d\" % (bks, self.labels[Nin + i])\n rlbl = \"%3d\" % self.bonds[Nin + i].dim\n else:\n r = \" \"\n rlbl = \" \"\n print(\" %s| %s %s |%s\" % (l, llbl, rlbl, r))\n print(\" | | \")\n print(\" --------------- \")\n else:\n print(\" ------------- \")\n for i in range(vl):\n if i == 0:\n print(\" / \\ \")\n else:\n print(\" | | \")\n if i < Nin:\n bks = \"__\"\n l = \"%3d %s__\" % (self.labels[i], bks)\n llbl = \"%-3d\" % self.bonds[i].dim\n else:\n l = \" \"\n llbl = \" \"\n if i < Nout:\n bks = \"__\"\n r = \"__%s %-3d\" % (bks, self.labels[Nin + i])\n rlbl = \"%3d\" % self.bonds[Nin + i].dim\n else:\n r = \" \"\n rlbl = \" \"\n print(\" %s| %s %s |%s\" % (l, llbl, rlbl, r))\n print(\" \\ / \")\n print(\" ------------- \")\n\n if bond_info:\n for i in range(len(self.bonds)):\n print(\"lbl:%d \" % (self.labels[i]), end=\"\")\n print(self.bonds[i])\n\n def __str__(self):\n print(\"Tensor name: %s\" % self.name)\n if self.braket is not None:\n print(\"braket_form : %s\" % (\"True\" if self.is_braket else \"False\"))\n\n if self.is_symm:\n print(\"[Symmetry]\")\n if self._contiguous:\n for b in range(len(self.Storage)):\n print(self.Storage[b])\n else:\n\n out = self.Contiguous()\n for b in range(len(out.Storage)):\n print(out.Storage[b])\n del out\n\n ## DEBUG >>>\n if DEBUG:\n print(\"xxxxxxxxxxxxxxxxxxxxxx\")\n print(\"[DEBUG]\")\n print(\"Real memory:\")\n for b in range(len(self.Storage)):\n print(self.Storage[b])\n print(\"xxxxxxxxxxxxxxxxxxxxxx\")\n ## <<<\n\n else:\n print(\"is_diag : %s\" % (\"True\" if self.is_diag else \"False\"))\n print(self.Storage)\n\n return \"\"\n\n def __repr__(self):\n print(\"Tensor name: %s\" % self.name)\n if self.braket is not None:\n print(\"braket_form : %s\" % (\"True\" if self.is_braket else \"False\"))\n\n if self.is_symm:\n print(\"[Symmetry]\")\n if self._contiguous:\n for b in range(len(self.Storage)):\n print(self.Storage[b])\n else:\n out = self.Contiguous()\n for b in range(len(out.Storage)):\n print(out.Storage[b])\n del out\n\n ## DEBUG >>>\n if DEBUG:\n print(\"xxxxxxxxxxxxxxxxxxxxxx\")\n print(\"Real memory:\")\n for b in range(len(self.Storage)):\n print(self.Storage[b])\n print(\"xxxxxxxxxxxxxxxxxxxxxx\")\n ## <<<\n\n else:\n print(\"is_diag : %s\" % (\"True\" if self.is_diag else \"False\"))\n print(self.Storage)\n\n return \"\"\n\n def __len__(self):\n if self.is_symm:\n raise Exception(\"[ERROR]\", \"UniTensor with symmetry doesn't have property len\")\n else:\n return len(self.Storage)\n\n def __eq__(self, rhs):\n \"\"\"\n Compare two UniTensors.\n ::\n a == b\n\n where a & b are UniTensors.\n\n Note that this will only compare the shape of Storage. Not the content of torch tensor.\n\n\n \"\"\"\n if isinstance(rhs, self.__class__):\n if self.is_symm != rhs.is_symm:\n return False\n\n if not (len(self.bonds) == len(rhs.bonds)):\n return False\n\n if not (all(self.bonds[i] == rhs.bonds[i] for i in range(len(self.bonds))) and all(\n self.labels[i] == rhs.labels[i] for i in range(len(self.labels)))):\n return False\n\n if not self.rowrank == rhs.rowrank:\n return False\n\n if (self.braket is None) != (rhs.braket is None):\n return False\n\n if self.braket is None:\n if not (self.braket == rhs.braket).all():\n return False\n\n if self.is_symm:\n iss = True\n else:\n iss = (self.is_diag == rhs.is_diag)\n iss = iss and (self.Storage.shape == rhs.Storage.shape)\n\n return iss\n\n else:\n raise ValueError(\"Bond.__eq__\", \"[ERROR] invalid comparison between Bond object and other type class.\")\n\n def __ne__(self, other):\n return not (self == other)\n\n @property\n def device(self):\n \"\"\"\n Return the device of UniTensor\n \n Return:\n\n torch.device\n \n \"\"\"\n\n if self.is_symm:\n return self.Storage[0].device\n else:\n return self.Storage.device\n\n @property\n def dtype(self):\n \"\"\"\n Return the device of UniTensor\n \n Return:\n torch.type \n \"\"\"\n if self.is_symm:\n return self.Storage[0].dtype\n else:\n return self.Storage.dtype\n\n @property\n def shape(self):\n \"\"\"\n Return the shape of UniTensor\n\n Return:\n\n torch.Size\n \"\"\"\n if self.is_symm:\n ## what to return ?\n # raise Exception(\"[DEvelope]\")\n return torch.Size([self.bonds[z].dim for z in range(len(self.bonds))])\n else:\n if self.is_diag:\n return torch.Size([self.bonds[0].dim, self.bonds[0].dim])\n else:\n return self.Storage.shape\n\n ## Fill :\n def __getitem__(self, key):\n if self.is_symm:\n raise Exception(\"UniTensor.__getitem__\",\n \"[ERROR] cannot use [] to getitem from a block-form tensor. Use get block first.\")\n return From_torch(self.Storage[key], rowrank=0)\n\n def __setitem__(self, key, value):\n if self.is_symm:\n raise Exception(\"UniTensor.__setitem__\",\n \"[ERROR] cannot use [] to setitem from a block-form tensor. Use get block first.\")\n\n self.Storage[key] = value\n\n def item(self):\n \"\"\"\n Get the python scalar from a UniTensor with one element\n\n Return:\n python scalar\n\n \"\"\"\n if self.is_symm:\n raise TypeError(\"UniTensor.item\", \"[ERROR] cannot operate item() on symmetry tensor\")\n else:\n if self.Storage.numel() != 1:\n raise TypeError(\"UniTensor.item\",\n \"[ERROR] only one-element tensors can be converted to Python scalars.\")\n\n return self.Storage.item()\n\n ## Math ::\n def __add__(self, other):\n if isinstance(other, self.__class__):\n if self.is_symm != other.is_symm:\n raise TypeError(\"[ERROR]\", \"Cannot + two symm and non-symm UniTensor \")\n\n if self.is_symm:\n if self != other:\n raise TypeError(\"[ERROR]\", \"Cannot + two symm tensors that have different symmetry structure.\")\n if self.is_contiguous() and other.is_contiguous():\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(torch_tensor=[self.Storage[b] + other.Storage[b] for b in range(len(self.Storage))],\n braket=self.braket,\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n\n\n else:\n raise Exception(\"[ERROR]\",\n \"Two symmetry tensors can only add when both are contiguous.\\n suggestion: Call .Contiguous() or .Contiguous_() before add\")\n\n\n\n else:\n if not (self.is_braket is None) == (other.is_braket is None):\n raise Exception(\"[ERROR]\", \"Cannot add non-braket-tag tensor with tagged tensor\")\n\n if self.is_diag and other.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=True)\n\n tmp._mac(torch_tensor=self.Storage + other.Storage,\n braket=self.braket)\n\n\n elif self.is_diag == False and other.is_diag == False:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=self.Storage + other.Storage,\n braket=self.braket)\n\n else:\n if self.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=torch.diag(self.Storage) + other.Storage,\n braket=self.braket)\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=self.Storage + torch.diag(other.Storage),\n braket=self.braket)\n else:\n if self.is_symm:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(torch_tensor=[self.Storage[b] + other for b in range(len(self.Storage))],\n braket=self.braket,\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n tmp._mac(torch_tensor=self.Storage + other,\n braket=self.braket)\n return tmp\n\n def __radd__(self, other):\n ## U + U is handled by __add__, so we only need to process x + U here.\n if self.is_symm:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[other + self.Storage[b] for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n tmp._mac(torch_tensor=other + self.Storage,\n braket=self.braket)\n\n return tmp\n\n def __sub__(self, other):\n if isinstance(other, self.__class__):\n if self.is_symm != other.is_symm:\n raise TypeError(\"[ERROR]\", \"[Cannot subtract symmetric and non-symmetric UniTensors]\")\n\n if self.is_symm:\n if self != other:\n raise TypeError(\"[ERROR]\", \"Cannot subtract symmetric tensors with different symmetry structure.\")\n if self.is_contiguous() and other.is_contiguous():\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[self.Storage[b] - other.Storage[b] for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n\n else:\n raise Exception(\"[ERROR]\",\n \"Two symmetry tensors can only sub when both are contiguous.\\n suggestion: Call .Contiguous() or .Contiguous_() before sub\")\n\n else:\n if not (self.is_braket is None) == (other.is_braket is None):\n raise Exception(\"[ERROR]\", \"Cannot sub non-braket-tag tensor with tagged tensor\")\n\n if self.is_diag and other.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=True)\n\n tmp._mac(torch_tensor=self.Storage - other.Storage,\n braket=self.braket)\n\n elif self.is_diag == False and other.is_diag == False:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=self.Storage - other.Storage,\n braket=self.braket)\n else:\n if self.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=torch.diag(self.Storage) - other.Storage,\n braket=self.braket)\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(braket=self.braket,\n torch_tensor=self.Storage - torch.diag(other.Storage))\n\n else:\n if self.is_symm:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(braket=self.braket,\n torch_tensor=[self.Storage[b] - other for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n\n tmp._mac(torch_tensor=self.Storage - other,\n braket=self.braket)\n\n return tmp\n\n def __rsub__(self, other):\n if self.is_symm:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[other - self.Storage[b] for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n tmp._mac(braket=self.braket,\n torch_tensor=other - self.Storage)\n return tmp\n\n \"\"\n\n def Whole_transpose(self):\n \"\"\"\n If the UniTensor is tagged, exchange the bra/ket tags on each bond, and transpose (rowspace and colspace) by referencing to the \"rowrank\".\n \n Return:\n UniTensor, shared the same type with each bond's tag bra <-> ket exchanged.\n\n \"\"\"\n out = copy.deepcopy(self)\n if self.is_symm:\n ## symmetry Tensor:\n for b in range(len(self.bonds)):\n if out.bonds[b].bondType == BD_KET:\n out.bonds[b].bondType = BD_BRA\n else:\n out.bonds[b].bondType = BD_KET\n out.braket *= -1\n tmp = np.roll(np.arange(len(out.bonds)).astype(np.int), -out.rowrank)\n out.Permute(tmp, rowrank=len(out.bonds) - out.rowrank)\n\n else:\n if self.braket is None:\n ## untagged tensor\n tmp = np.roll(np.arange(len(out.bonds)).astype(np.int), -out.rowrank)\n out.Permute(tmp, rowrank=len(out.bonds) - out.rowrank)\n return out\n else:\n ## tagged nonsymm Tensor: \n for b in range(len(self.bonds)):\n if out.bonds[b].bondType == BD_KET:\n out.bonds[b].bondType = BD_BRA\n else:\n out.bonds[b].bondType = BD_KET\n out.braket *= -1\n tmp = np.roll(np.arange(len(out.bonds)).astype(np.int), -out.rowrank)\n out.Permute(tmp, rowrank=len(out.bonds) - out.rowrank)\n\n return out\n\n \"\"\n\n def __mul__(self, other):\n if isinstance(other, self.__class__):\n if self.is_symm != other.is_symm:\n raise TypeError(\"[ERROR]\", \"Cannot * two symm and non-symm UniTensor\")\n if self.is_symm:\n if self != other:\n raise TypeError(\"[ERROR]\", \"Cannot * two symm tensors that have different symmetry structure.\")\n if self.is_contiguous() and other.is_contiguous():\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[self.Storage[b] * other.Storage[b] for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n else:\n raise Exception(\"[ERROR]\",\n \"Two symmetry tensors can only mul when both are contiguous.\\n suggestion: Call .Contiguous() or .Contiguous_() before mul\")\n else:\n if not (self.is_braket is None) == (other.is_braket is None):\n raise Exception(\"[ERROR]\", \"Cannot mul non-braket-tag tensor with tagged tensor\")\n\n if self.is_diag and other.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=True)\n tmp._mac(torch_tensor=self.Storage * other.Storage,\n braket=self.braket)\n elif self.is_diag == False and other.is_diag == False:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=self.Storage * other.Storage,\n braket=self.braket)\n else:\n if self.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=torch.diag(self.Storage) * other.Storage,\n braket=self.braket)\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=self.Storage * torch.diag(other.Storage),\n braket=self.braket)\n else:\n if self.is_symm:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[self.Storage[b] * other for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n\n\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n tmp._mac(braket=self.braket,\n torch_tensor=self.Storage * other)\n return tmp\n\n def __rmul__(self, other):\n if self.is_symm:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[other * self.Storage[b] for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n tmp._mac(torch_tensor=other * self.Storage,\n braket=self.braket)\n return tmp\n\n def __pow__(self, other):\n if self.is_symm:\n # raise Exception(\"[Develope][check impl]\")\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[self.Storage[b] ** other for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous, self._accu_off_in, self._accu_off_out, self._block_qnums))\n return tmp\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n\n tmp._mac(braket=self.braket, torch_tensor=self.Storage ** other)\n return tmp\n\n def __truediv__(self, other):\n if isinstance(other, self.__class__):\n if self.is_symm != other.is_symm:\n raise TypeError(\"[ERROR]\", \"Cannot / two symm and non-symm UniTensor.\")\n\n if self.is_symm:\n if self != other:\n raise TypeError(\"[ERROR]\", \"Cannot / two symm tensors that have different symmetry structure.\")\n if self.is_contiguous() and other.is_contiguous():\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[self.Storage[b] / other.Storage[b] for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n else:\n raise Exception(\"[ERROR]\",\n \"Two symmetry tensors can only mul when both are contiguous.\\n suggestion: Call .Contiguous() or .Contiguous_() before mul\")\n else:\n if not (self.is_braket is None) == (other.is_braket is None):\n raise Exception(\"[ERROR]\", \"Cannot / non-braket-tag tensor with tagged tensor\")\n\n if self.is_diag:\n if other.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=True)\n\n tmp._mac(braket=self.braket,\n torch_tensor=self.Storage / other.Storage)\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(torch_tensor=torch.diag(self.Storage) / other.Storage,\n braket=self.braket)\n else:\n if other.is_diag:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(torch_tensor=self.Storage / torch.diag(other.Storage),\n braket=self.braket)\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=self.Storage / other.Storage)\n\n else:\n if self.is_symm:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=[self.Storage[b] / other for b in range(len(self.Storage))],\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous,\n self._accu_off_in,\n self._accu_off_out,\n self._block_qnums))\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False,\n is_diag=self.is_diag)\n tmp._mac(braket=self.braket,\n torch_tensor=self.Storage / other)\n\n return tmp\n\n ## This is the same function that behaves as the memberfunction.\n def Svd(self):\n \"\"\"\n This is the member function of Svd, see tor10.linalg.Svd()\n \"\"\"\n if self.is_symm:\n raise Exception(\"UniTensor.Svd\",\n \"[ERROR] cannot perform Svd on a symmetry,block-form tensor. use GetBlock() first and perform svd on the Block.\")\n\n if self.braket is not None:\n raise Exception(\"UniTensor.Svd\", \"[ERROR] cannot perform Svd on a bra-ket tagged tensor.\")\n\n return linalg.Svd(self)\n\n def Svd_truncate(self,keepdim=None):\n \"\"\"\n This is the member function of Svd_truncate, see tor10.linalg.Svd_truncate()\n \"\"\"\n if self.is_symm:\n raise Exception(\"UniTensor.Svd_truncate\",\n \"[ERROR] cannot perform Svd on a symmetry,block-form tensor. use GetBlock() first and perform svd on the Block.\")\n\n return linalg.Svd_truncate(self,keepdim)\n\n def Norm(self):\n \"\"\"\n This is the member function of Norm, see tor10.linalg.Norm\n \"\"\"\n if self.is_symm:\n raise Exception(\"UniTensor.Norm\",\n \"[ERROR] cannot perform Norm on a symmetry,block-form tensor. use GetBlock() first and perform svd on the Block.\")\n\n return linalg.Norm(self)\n\n def Det(self):\n \"\"\"\n This is the member function of Det, see tor10.linalg.Det\n \"\"\"\n if self.is_symm:\n raise Exception(\"UniTensor.Det\",\n \"[ERROR] cannot perform Det on a symmetry, block-form tensor. use GetBlock() first and perform det on the Block.\")\n\n return linalg.Det(self)\n\n def Matmul(self, b):\n \"\"\"\n This is the member function of Matmul, see tor10.linalg.Matmul\n \"\"\"\n if self.is_symm:\n raise Exception(\"UniTensor.Matmul\",\n \"[ERROR] cannot perform MatMul on a symmetry, block-form tensor. use GetBlock() first and perform matmul on the Block.\")\n\n return linalg.Matmul(self, b)\n\n ## Extended Assignment:\n def __iadd__(self, other):\n if isinstance(other, self.__class__):\n if self.is_symm != other.is_symm:\n raise TypeError(\"[ERROR]\", \"cannot += symm and non-symm UniTensors\")\n\n if self.is_symm:\n\n if self != other:\n raise TypeError(\"[ERROR]\", \"Cannot + two symm tensors that have different symmetry structure.\")\n if self.is_contiguous() and other.is_contiguous():\n for b in range(len(self.Storage)):\n self.Storage[b] += other.Storage[b]\n\n else:\n raise Exception(\"[ERROR]\",\n \"Two symmetry tensors can only add when both are contiguous.\\n suggestion: Call .Contiguous() or .Contiguous_() before add\")\n\n else:\n if (self.braket is None) != (other.braket is None):\n raise Exception(\"[ERROR]\", \"cannot += non-braket-tag tensor with tagged tensor\")\n\n if self.is_diag == other.is_diag:\n self.Storage += other.Storage\n else:\n if self.is_diag:\n self.Storage = torch.diag(self.Storage) + other.Storage\n self.is_diag = False\n else:\n self.Storage += torch.diag(other.Storage)\n\n else:\n if self.is_symm:\n for b in range(len(self.Storage)):\n self.Storage[b] += other\n else:\n self.Storage += other\n\n return self\n\n def __isub__(self, other):\n if isinstance(other, self.__class__):\n if self.is_symm != other.is_symm:\n raise TypeError(\"[ERROR]\", \"cannot -= symm and non-symm UniTensors\")\n\n if self.is_symm:\n if self != other:\n raise TypeError(\"[ERROR]\", \"Cannot - two symm tensors that have different symmetry structure.\")\n if self.is_contiguous() and other.is_contiguous():\n for b in range(len(self.Storage)):\n self.Storage[b] -= other.Storage[b]\n\n else:\n raise Exception(\"[ERROR]\",\n \"Two symmetry tensors can only sub when both are contiguous.\\n suggestion: Call .Contiguous() or .Contiguous_() before sub\")\n else:\n if (self.braket is None) != (other.braket is None):\n raise Exception(\"[ERROR]\", \"cannot -= non-braket-tag tensor with tagged tensor\")\n\n if self.is_diag == other.is_diag:\n self.Storage -= other.Storage\n else:\n if self.is_diag:\n self.Storage = torch.diag(self.Storage) + other.Storage\n self.is_diag = False\n else:\n self.Storage -= torch.diag(other.Storage)\n\n else:\n if self.is_symm:\n for b in range(len(self.Storage)):\n self.Storage[b] -= other\n else:\n self.Storage -= other\n\n return self\n\n def __imul__(self, other):\n if isinstance(other, self.__class__):\n if self.is_symm != other.is_symm:\n raise TypeError(\"[ERROR]\", \"cannot *= symm and non-symm UniTensors\")\n\n if self.is_symm:\n if self != other:\n raise TypeError(\"[ERROR]\", \"Cannot * two symm tensors that have different symmetry structure.\")\n if self.is_contiguous() and other.is_contiguous():\n for b in range(len(self.Storage)):\n self.Storage[b] *= other.Storage[b]\n\n else:\n raise Exception(\"[ERROR]\",\n \"Two symmetry tensors can only mul when both are contiguous.\\n suggestion: Call .Contiguous() or .Contiguous_() before mul\")\n\n else:\n if (self.braket is None) != (other.braket is None):\n raise Exception(\"[ERROR]\", \"cannot -= non-braket-tag tensor with tagged tensor\")\n\n if self.is_diag == other.is_diag:\n self.Storage *= other.Storage\n else:\n if self.is_diag:\n self.Storage = torch.diag(self.Storage) * other.Storage\n self.is_diag = False\n else:\n self.Storage *= torch.diag(other.Storage)\n else:\n if self.is_symm:\n for b in range(len(self.Storage)):\n self.Storage[b] *= other\n else:\n self.Storage *= other\n\n return self\n\n ## Miscellaneous\n def Rand(self):\n \"\"\"\n Randomize the UniTensor.\n\n Note that in current version, only a UniTensor without symmetry quantum numbers can be randomized.\n\n Return:\n self\n\n \"\"\"\n # v0.3+ OK.\n _Randomize(self)\n\n return self\n\n def CombineBonds(self, X_to_combine, new_label=None, permute_back=False, by_label=True):\n \"\"\"\n This function combines the bonds in input UniTensor [a] by the specified labels [label].\n\n [Note][v0.3+] that ket-bonds can only be combine with ket-bonds, bra-bonds can only combine with bra-bonds.\n\n Args:\n\n labels_to_combine:\n labels that to be combined. It should be a int list / numpy array of the label. All the bonds with specified labels in the current UniTensor will be combined\n\n new_label [default=None]\n This should be an integer, for floating point number, it will be truncated to integer.\n\n if new_label is set to None, the combined bond will have label as the bond in the to-be-combined bonds that has the smallest LABEL in input tensor.\n\n if new_label is set, the combined bond will have label [new_label]\n \n permuted_back[False]:\n this state if the combine bond should be permuted back or not. If false, the combined bond will always be presented as the first bond.\n\n\n Example:\n\n 1. Combine Bond for an non-symmetric tensor.\n\n >>> bds_x = [tor10.Bond(5),tor10.Bond(5),tor10.Bond(3)]\n >>> x = tor10.UniTensor(bonds=bds_x, rowrank=2, labels=[4,3,5])\n >>> y = tor10.UniTensor(bonds=bds_x, rowrank=2, labels=[4,3,5])\n >>> x.Print_diagram()\n tensor Name : \n tensor Rank : 3\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 5 3 |____ 5 \n | | \n 3 ____| 5 | \n \\ / \n ------------- \n lbl:4 Dim = 5 |\n REG :\n _\n lbl:3 Dim = 5 |\n REG :\n _\n lbl:5 Dim = 3 |\n REG :\n\n\n * combine bond with label \"3\" into \"5\"\n \n >>> x.CombineBonds([5,3])\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 2\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 5 15 |____ 5 \n \\ / \n ------------- \n lbl:4 Dim = 5 |\n REG :\n _\n lbl:5 Dim = 15 |\n REG :\n\n\n * combine bond with label \"5\" into \"3\"\n\n >>> y.CombineBonds([3,5])\n >>> y.Print_diagram()\n tensor Name : \n tensor Rank : 2\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 5 | \n | | \n 3 ____| 15 | \n \\ / \n ------------- \n lbl:4 Dim = 5 |\n REG :\n _\n lbl:3 Dim = 15 |\n REG :\n\n \n >>> z = tor10.UniTensor(bonds=bds_x*2, rowrank=3, labels=[4,3,5,6,7,8])\n >>> z2 = tor10.UniTensor(bonds=bds_x*2, rowrank=3, labels=[4,3,5,6,7,8])\n >>> z.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 6\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 5 5 |____ 6 \n | | \n 3 ____| 5 5 |____ 7 \n | | \n 5 ____| 3 3 |____ 8 \n \\ / \n ------------- \n \n >>> z.CombineBonds([4,5,6])\n >>> z.Print_diagram()\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 225 5 |____ 3 \n | | \n | 5 |____ 7 \n | | \n | 3 |____ 8 \n \\ / \n ------------- \n\n >>> z2.CombineBonds([4,5,6],permute_back=True)\n >>> z2.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 225 5 |____ 7 \n | | \n 3 ____| 5 3 |____ 8 \n \\ / \n -------------\n \"\"\"\n if len(X_to_combine) < 2:\n raise ValueError(\"CombineBonds\", \"[ERROR] the number of bonds to combine should be greater than one.\")\n\n # checking :\n if by_label:\n same_lbls, x_ind, _ = np.intersect1d(self.labels, X_to_combine, return_indices=True)\n\n if len(same_lbls) != len(X_to_combine):\n raise Exception(\"[ERROR] not all the label appears in the current tensor.\")\n\n idxs_to_combine = []\n for l in X_to_combine:\n idxs_to_combine.append(np.argwhere(self.labels == l).flatten()[0])\n\n idxs_to_combine = np.array(idxs_to_combine, dtype=np.int)\n # print(idxs_to_combine)\n else:\n if not all(X_to_combine < len(a.labels)):\n raise Exception(\"[ERROR] index out of bound\")\n\n idxs_to_combine = np.array(X_to_combine, dtype=np.int)\n # print(idxs_to_combine)\n\n _CombineBonds(self, idxs_to_combine, new_label, permute_back)\n\n def Contiguous_(self):\n \"\"\"\n Make the memory contiguous. This is similar as pytorch's contiguous_().\n Because of Permute does not change the memory layout, after permute, only the shape of UniTensor is changed,\n the underlying memory layout does not change.\n This UniTensor under this condition is called \"non-contiguous\".\n When call the Contiguous_(), the memory will be moved to match the shape of UniTensor.\n *Note* Normally, it is not necessary to call contiguous. Most of the linalg function implicitly make the\n UniTensor contiguous. If one calls a function that requires a contiguous tensor,\n the error will be raised and you know you have to put UniTensor.Contiguous() or UniTensor.Contiguous_() there.\n\n Return:\n self\n\n Example:\n\n >>> bds_x = [tor10.Bond(5),tor10.Bond(5),tor10.Bond(3)]\n >>> x = Tt.UniTensor(bonds=bds_x,rowrank=1, labels=[4,3,5])\n >>> print(x.is_contiguous())\n True\n\n >>> x.Permute([0,2,1],rowrank=1)\n >>> print(x.is_contiguous())\n False\n\n >>> x.Contiguous_()\n >>> print(x.is_contiguous())\n True\n\n \"\"\"\n if self.is_symm:\n # raise Exception(\"[Develope]\")\n if self._contiguous:\n return self\n else:\n out = self.Contiguous()\n out.name = self.name\n self.__dict__.update(out.__dict__)\n return self\n\n else:\n self.Storage = self.Storage.contiguous()\n\n return self\n\n def Contiguous(self):\n \"\"\"\n Make the memory contiguous. This is similar as pytorch's contiguous().\n Because of the Permute does not move the memory, after permute, only the shape of UniTensor is changed, the underlying memory does not change. The UniTensor in this status is called \"non-contiguous\" tensor.\n When call the Contiguous(), the memory will be moved to match the shape of UniTensor.\n \n if the current tensor is already in contiguous, return self. Otherwise, return a new tensor.\n\n\n Return:\n self\n\n Example:\n\n >>> bds_x = [tor10.Bond(5),tor10.Bond(5),tor10.Bond(3)]\n >>> x = Tt.UniTensor(bonds=bds_x,rowrank=1, labels=[4,3,5])\n >>> print(x.is_contiguous())\n True\n\n >>> x.Permute([0,2,1],rowrank=1)\n >>> print(x.is_contiguous())\n False\n\n >>> y = x.Contiguous()\n >>> print(y.is_contiguous())\n True\n\n >>> print(x.is_contiguous())\n False\n\n \"\"\"\n if self.is_symm:\n # raise Exception(\"[Develope]\")\n if self._contiguous:\n return self\n else:\n out = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n device=self.device,\n dtype=self.dtype)\n\n out._mac(braket=self.braket)\n\n out_bd_dims = np.array([out.bonds[x].dim for x in range(out.rowrank)], dtype=np.int)\n\n ## copy elemenets: \n for b in range(len(self.Storage)):\n oldshape = self.Storage[b].shape\n for i in range(oldshape[0]):\n for j in range(oldshape[1]):\n oldidx = np.concatenate((self._Ket_invmapper_blks[b][i], self._Bra_invmapper_blks[b][j]))\n newidx = oldidx[self._mapper]\n #\n new_row = int(np.sum(out._accu_off_in * newidx[:out.rowrank]))\n new_col = int(np.sum(out._accu_off_out * newidx[out.rowrank:]))\n b_id_in = out._Ket_mapper_blks[new_row]\n b_id_out = out._Bra_mapper_blks[new_col]\n\n ## [DEBUG] >>>>\n if DEBUG:\n if b_id_in[0] < 0 or b_id_out[0] < 0:\n raise Exception(\"[ERROR][DEBUG][Internal check neg pos]\")\n if b_id_in[0] != b_id_out[0]:\n print(b_id_in[0], b_id_out[0])\n print(\"[ERROR!][DEBUG][Internal check un-matched block]\")\n exit(1)\n ## <<<<\n out.Storage[b_id_in[0]][b_id_in[1], b_id_out[1]] = self.Storage[b][i, j]\n # out._contiguous = True\n return out\n\n else:\n if self.is_contiguous():\n return self\n else:\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n is_diag=self.is_diag,\n rowrank=self.rowrank,\n check=False)\n\n tmp._mac(braket=self.braket,\n torch_tensor=self.Storage.contiguous())\n return tmp\n\n def is_contiguous(self):\n \"\"\"\n Return the status of memory contiguous.\n\n Return:\n bool, if True, then the Storage of UniTensor is contiguous. if False, then the Storage of UiTensor is non-contiguous.\n\n \"\"\"\n if self.is_symm:\n return self._contiguous\n else:\n return self.Storage.is_contiguous()\n\n def Permute(self, mapper, rowrank=None, by_label=False):\n \"\"\"\n Permute the bonds of the UniTensor.\n \n [Note] the computation complexity of Permute is O(1) which is very fast. The permute will not change the underlying memory layout. It will put the tensor into a \"non-contiguous\" status. Call Contiguous() or Contiguous_() when actually need to move memory.\n\n\n Args:\n mapper:\n a python list or 1d numpy array with integer type elements that the UniTensor permute accordingly.\n If by_label=False, the in_mapper will use index as mapper.\n\n by_label: [default False]\n bool, when True, the mapper using the labels. When False, the mapper using the index.\n\n rowrank: [default: current rowrank]\n uint, the rank of row space. If not set, it is equal to the current Tensor's rank of row space.\n\n Return:\n\n self\n\n Example:\n\n >>> bds_x = [tor10.Bond(6),tor10.Bond(5),tor10.Bond(4),tor10.Bond(3),tor10.Bond(2)]\n >>> x = tor10.UniTensor(bonds=bds_x, rowrank=3,labels=[1,3,5,7,8])\n >>> y = copy.deepcopy(x)\n >>> z = copy.deepcopy(x)\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 5\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 1 ____| 6 3 |____ 7 \n | | \n 3 ____| 5 2 |____ 8 \n | | \n 5 ____| 4 | \n \\ / \n ------------- \n\n >>> x.Permute([0,2,1,4,3])\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 5\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 1 ____| 6 2 |____ 8 \n | | \n 5 ____| 4 3 |____ 7 \n | | \n 3 ____| 5 | \n \\ / \n -------------\n \n >>> y.Permute([3,1,5,7,8],by_label=True)\n >>> y.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 5\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 3 ____| 5 3 |____ 7 \n | | \n 1 ____| 6 2 |____ 8 \n | | \n 5 ____| 4 | \n \\ / \n -------------\n\n >>> z.Permute([3,1,5,7,8],rowrank=2,by_label=True)\n >>> z.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 5\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 3 ____| 5 4 |____ 5 \n | | \n 1 ____| 6 3 |____ 7 \n | | \n | 2 |____ 8 \n \\ / \n -------------\n\n\n \n \"\"\"\n ## check\n if not (isinstance(mapper, list) or isinstance(mapper, np.ndarray)):\n raise TypeError(\"UniTensor.Permute\", \"[ERROR] mapper should be an 1d python list or numpy array.\")\n if len(mapper) != len(self.bonds):\n raise ValueError(\"UniTensor.Permute\", \"[ERROR] len(mapper) should equal to Tensor rank\")\n\n ## check duplicate:\n if len(mapper) != len(np.unique(mapper)):\n raise ValueError(\"UniTensor.Permute\", \"[ERROR] mapper contain duplicate elements.\")\n\n if by_label:\n DD = dict(zip(self.labels, np.arange(len(self.labels))))\n\n if not all(lbl in self.labels for lbl in mapper):\n raise Exception(\"UniTensor.Permute\",\n \"[ERROR] by_label=True but mapper contain invalid labels not appear in the UniTensor label\")\n idx_mapper = np.array([DD[x] for x in mapper])\n else:\n idx_mapper = np.array(mapper).astype(np.int)\n\n self.labels = self.labels[idx_mapper]\n self.bonds = self.bonds[idx_mapper]\n if self.braket is not None:\n self.braket = self.braket[idx_mapper]\n\n if rowrank is not None:\n if rowrank < 0:\n raise ValueError(\"UniTensor.Permute\", \"rowrank must >=0\")\n\n self.rowrank = rowrank\n\n ## check braket_form:\n self._check_braket()\n\n ## master switch\n if self.is_symm:\n # raise Exception(\"[Developing]\")\n self._mapper = self._mapper[idx_mapper]\n Arr_range = np.arange(len(self._mapper)).astype(np.int)\n if (self._mapper == Arr_range).all():\n self._contiguous = True\n else:\n self._contiguous = False\n\n self._inv_mapper = np.zeros(len(self._mapper)).astype(np.int)\n self._inv_mapper[self._mapper] = Arr_range\n self._inv_mapper = self._inv_mapper.astype(np.int)\n\n b_tqin, b_tqout = self.GetTotalQnums(physical=False)\n tqin_uni = b_tqin.GetUniqueQnums()\n tqout_uni = b_tqout.GetUniqueQnums()\n self._block_qnums = _fx_GetCommRows(tqin_uni, tqout_uni)\n\n else:\n\n if self.is_diag:\n if self.rowrank != 1:\n raise Exception(\"UniTensor.Permute\",\n \"[ERROR] UniTensor.is_diag=True must have rowrank==1\\n\" + \"Suggest, call Todense()\")\n\n else:\n # print(idx_mapper)\n # print(self.Storage)\n self.Storage = self.Storage.permute(tuple(idx_mapper))\n\n return self\n\n def Reshape(self, dimer, rowrank, new_labels=None):\n \"\"\"\n Return a new reshaped UniTensor into the shape specified as [dimer], with the first [rowrank] Bonds as bra-bond and other bonds as ket-bond.\n\n [Note] \n\n 1.Reshaping a UniTensor physically re-define the new basis, which construct a new physical definition tensor that has the same element.\n\n 2.Reshape can only operate on an untagged tensor.\n\n Args:\n\n dimer:\n The new shape of the UniTensor. This should be a python list.\n\n rowrank:\n The number of bonds in row space.\n\n new_labels:\n The new labels that will be set for new bonds after reshape.\n\n reture:\n\n UniTensor\n\n Example:\n\n >>> bds_x = [tor10.Bond(6),tor10.Bond(5),tor10.Bond(3)]\n >>> x = tor10.UniTensor(bonds=bds_x, rowrank=1,labels=[4,3,5])\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 6 5 |____ 3 \n | | \n | 3 |____ 5 \n \\ / \n ------------- \n\n >>> y = x.Reshape([2,3,5,3],new_labels=[1,2,3,-1],rowrank=2)\n >>> y.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 1 ____| 2 5 |____ 3 \n | | \n 2 ____| 3 3 |____ -1 \n \\ / \n ------------- \n\n\n \"\"\"\n if self.is_symm:\n raise TypeError(\"UniTensor.Reshape\", \"[ERROR] Cannot perform Reshape on a symmetry Tensor\")\n\n if self.is_diag:\n raise Exception(\"UniTensor.Reshape\", \"[ERROR] UniTensor.is_diag=True cannot be Reshape.\\n\" +\n \"[Suggest] Call UniTensor.Todense()\")\n\n if self.braket is not None:\n raise Exception(\"UniTensor.Reshape\",\n \"[ERROR] UniTensor.Reshape can only operate on a [untagged] tensor with regular bonds (BD_REG).\")\n\n if not isinstance(dimer, list):\n raise TypeError(\"UniTensor.Reshape\", \"[ERROR] mapper should be an python list.\")\n\n new_Storage = copy.deepcopy(self.Storage)\n\n new_Storage = new_Storage.reshape(dimer)\n if new_labels is None:\n new_labels = np.arange(len(dimer))\n\n tmp = UniTensor(bonds=np.array([Bond(dimer[i]) for i in range(len(dimer))]),\n labels=new_labels,\n rowrank=rowrank,\n check=False)\n\n tmp._mac(torch_tensor=new_Storage)\n\n return tmp\n\n def Reshape_(self, dimer, rowrank, new_labels=None):\n \"\"\"\n Inplace version of Reshape. \n Reshape UniTensor into the shape specified as [dimer], with the first [rowrank] Bonds as bra-bond and other bonds as ket-bond.\n\n [Note] \n\n 1.Reshapeing a UniTensor physically re-define the bra-ket basis space, which construct a new physical definition tensor that has the same element.\n\n 2.Reshape can only operate on an untagged tensor.\n\n Args:\n\n dimer:\n The new shape of the UniTensor. This should be a python list.\n\n rowrank:\n The number of bonds in row space.\n\n new_labels [option]:\n The new labels that will be set for new bonds after reshape. If not set, the label will be initialize using default enumerate rule.\n\n Return:\n\n self\n \n Example:\n\n >>> bds_x = [tor10.Bond(6),tor10.Bond(5),tor10.Bond(3)]\n >>> x = tor10.UniTensor(bonds=bds_x, rowrank=1,labels=[4,3,5])\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 6 5 |____ 3 \n | | \n | 3 |____ 5 \n \\ / \n ------------- \n\n >>> x.Reshape_([2,3,5,3],new_labels=[1,2,3,-1],rowrank=2)\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 1 ____| 2 5 |____ 3 \n | | \n 2 ____| 3 3 |____ -1 \n \\ / \n -------------\n\n\n \"\"\"\n if self.is_symm:\n raise TypeError(\"UniTensor.Reshape\", \"[ERROR] Cannot perform Reshape on a symmetry Tensor\")\n\n if self.is_diag:\n raise Exception(\"UniTensor.Reshape\", \"[ERROR] UniTensor.is_diag=True cannot be Reshape.\\n\" +\n \"[Suggest] Call UniTensor.Todense()\")\n\n if self.braket is not None:\n raise Exception(\"UniTensor.Reshape\",\n \"[ERROR] UniTensor.Reshape can only operate on a [untagged] tensor with regular bonds (BD_REG).\")\n\n if not isinstance(dimer, list):\n raise TypeError(\"UniTensor.Reshape\", \"[ERROR] mapper should be an python list.\")\n\n self.Storage = self.Storage.reshape(dimer)\n\n if new_labels is None:\n new_labels = np.arange(len(dimer))\n\n self.labels = new_labels\n self.bonds = np.array([Bond(dimer[i]) for i in range(len(dimer))])\n self.rowrank = rowrank\n\n return self\n\n def View(self, dimer, rowrank, new_labels=None):\n \"\"\"\n Return a new view of UniTensor into the shape specified as [dimer], with the first [rowrank] Bonds as bra-bond and other bonds as ket-bond.\n\n The View() can only operate on a contiguous tensor, otherwise, Contiguous_() or Contiguous() need to be called before the tensor can be viewed. This is the same as pytorch.view().\n\n [Note] \n\n 1.View a UniTensor physically re-define the new basis, which construct a new physical definition tensor that has the same element.\n\n 2.View can only operate on an untagged tensor.\n \n 3.View requires a contiguous tensor. \n\n Args:\n\n dimer:\n The new shape of the UniTensor. This should be a python list.\n\n rowrank:\n The number of bonds in row space.\n\n new_labels:\n The new labels that will be set for new bonds after reshape.\n\n reture:\n\n UniTensor\n\n Example:\n\n >>> bds_x = [tor10.Bond(6),tor10.Bond(5),tor10.Bond(3)]\n >>> x = tor10.UniTensor(bonds=bds_x, rowrank=1,labels=[4,3,5])\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 6 5 |____ 3 \n | | \n | 3 |____ 5 \n \\ / \n ------------- \n\n >>> x.Permute([0,2,1])\n >>> x.Contiguous_() # this is needed. \n >>> y = x.View([2,3,5,3],new_labels=[1,2,3,-1],rowrank=2)\n >>> y.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 1 ____| 2 5 |____ 3 \n | | \n 2 ____| 3 3 |____ -1 \n \\ / \n ------------- \n\n\n \"\"\"\n if self.is_symm:\n raise TypeError(\"UniTensor.View\", \"[ERROR] Cannot perform View on a symmetry Tensor\")\n\n if self.is_diag:\n raise Exception(\"UniTensor.View\", \"[ERROR] UniTensor.is_diag=True cannot be View.\\n\" +\n \"[Suggest] Call UniTensor.Todense()\")\n\n if not self.is_contiguous():\n raise Exception(\"UniTensor.View\",\n \"[ERROR] UniTensor is not contiguous. Call Contiguous_() or Contiguous() before .View()\")\n\n if self.braket is not None:\n raise Exception(\"UniTensor.View\",\n \"[ERROR] UniTensor.View can only operate on a [untagged] tensor with regular bonds (BD_REG).\")\n\n if not isinstance(dimer, list):\n raise TypeError(\"UniTensor.View\", \"[ERROR] mapper should be an python list.\")\n\n new_Storage = copy.deepcopy(self.Storage)\n\n new_Storage = new_Storage.view(dimer)\n if new_labels is None:\n new_labels = np.arange(len(dimer))\n\n tmp = UniTensor(bonds=np.array([Bond(dimer[i]) for i in range(len(dimer))]),\n labels=new_labels,\n rowrank=rowrank,\n check=False)\n\n tmp._mac(torch_tensor=new_Storage)\n\n return tmp\n\n def View_(self, dimer, rowrank, new_labels=None):\n \"\"\"\n Inplace version of View. \n View UniTensor into the shape specified as [dimer], with the first [rowrank] Bonds as bra-bond and other bonds as ket-bond.\n\n The View_() can only operate on a contiguous tensor, otherwise, Contiguous_() or Contiguous() need to be called before the tensor can be viewed. This is the inplace version of pytorch.view().\n \n\n [Note] \n\n 1.Viewing a UniTensor physically re-define the bra-ket basis space, which construct a new physical definition tensor that has the same element.\n\n 2.Viewing can only operate on an untagged tensor.\n\n 3. View_() requires a contiguous tensor.\n\n Args:\n\n dimer:\n The new shape of the UniTensor. This should be a python list.\n\n rowrank:\n The number of bonds in row space.\n\n new_labels [option]:\n The new labels that will be set for new bonds after reshape. If not set, the label will be initialize using default enumerate rule.\n\n Return:\n\n self\n \n Example:\n\n >>> bds_x = [tor10.Bond(6),tor10.Bond(5),tor10.Bond(3)]\n >>> x = tor10.UniTensor(bonds=bds_x, rowrank=1,labels=[4,3,5])\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 6 5 |____ 3 \n | | \n | 3 |____ 5 \n \\ / \n -------------\n \n >>> x.Permute([0,2,1])\n >>> x.Contiguous_() # this is needed\n >>> x.Reshape_([2,3,5,3],new_labels=[1,2,3,-1],rowrank=2)\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 1 ____| 2 5 |____ 3 \n | | \n 2 ____| 3 3 |____ -1 \n \\ / \n -------------\n\n\n \"\"\"\n if self.is_symm:\n raise TypeError(\"UniTensor.View_()\", \"[ERROR] Cannot perform View_ on a symmetry Tensor\")\n\n if self.is_diag:\n raise Exception(\"UniTensor.View_()\", \"[ERROR] UniTensor.is_diag=True cannot be View_.\\n\" +\n \"[Suggest] Call UniTensor.Todense()\")\n\n if self.braket is not None:\n raise Exception(\"UniTensor.View_()\",\n \"[ERROR] UniTensor.View_ can only operate on a [untagged] tensor with regular bonds (BD_REG).\")\n\n if not isinstance(dimer, list):\n raise TypeError(\"UniTensor.View_()\", \"[ERROR] mapper should be an python list.\")\n\n self.Storage = self.Storage.view(dimer)\n\n if new_labels is None:\n new_labels = np.arange(len(dimer))\n\n self.labels = new_labels\n self.bonds = np.array([Bond(dimer[i]) for i in range(len(dimer))])\n self.rowrank = rowrank\n\n return self\n\n ## Symmetric Tensor function\n def GetTotalQnums(self, physical=False):\n \"\"\"\n Return two combined bond objects that has the information for the total qnums at bra and ket bonds.\n\n Args:\n\n physical [default: False]:\n\n Return the physical total qnums.\n \n If True, the return qnums_brabonds will be the physical qnums of all bonds tagged by BD_BRA, and qnums_ketbonds will be the physical qnums of all bonds tagged by BD_KET. \n \n If False, the return qnums will be the qnums of all bonds in row-space, the mismatch bond will have reversed qnums upon combined. This will match the layout of current blocks. \n\n\n Return:\n qnums_brabonds, qnums_ketbonds:\n\n qnums_brabonds:\n a tor10.Bond, the combined bra-bond\n\n qnums_ketbonds:\n a tor10.Bond, the combined ket-bond.\n\n\n Example:\n\n * Multiple Symmetry::\n\n ## multiple Qnum:\n ## U1 x U1 x U1 x U1\n ## U1 = {-2,-1,0,1,2}\n ## U1 = {-1,1}\n ## U1 = {0,1,2,3}\n bd_sym_1 = tor10.Bond(3,tor10.BD_KET,qnums=[[0, 2, 1, 0],\n [1, 1,-1, 1],\n [2,-1, 1, 0]])\n bd_sym_2 = tor10.Bond(4,tor10.BD_KET,qnums=[[-1, 0,-1, 3],\n [ 0, 0,-1, 2],\n [ 1, 0, 1, 0],\n [ 2,-2,-1, 1]])\n bd_sym_3 = tor10.Bond(2,tor10.BD_BRA,qnums=[[-4, 3, 0,-1],\n [ 1, 1, -2,3]])\n\n sym_T = tor10.UniTensor(bonds=[bd_sym_1,bd_sym_2,bd_sym_3],rowrank=2,labels=[1,2,3],dtype=torch.float64)\n >>> sym_T.Pring_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: True\n on device : cpu\n braket_form : True\n |ket> <bra| \n --------------- \n | | \n 1 > __| 3 2 |__ < 3 \n | | \n 2 > __| 4 | \n | | \n --------------- \n\n \n >>> tqin, tqout = sym_T.GetTotalQnums()\n >>> print(tqin)\n Dim = 12 |\n KET : U1:: +4 +3 +2 +1 +3 +2 +1 +0 +2 +1 +0 -1\n U1:: -3 -1 -1 -1 -1 +1 +1 +1 +0 +2 +2 +2\n U1:: +0 +2 +0 +0 -2 +0 -2 -2 +0 +2 +0 +0\n U1:: +1 +0 +2 +3 +2 +1 +3 +4 +1 +0 +2 +3\n\n >>> print(tqout)\n Dim = 2 |\n BRA : U1:: +1 -4\n U1:: +1 +3\n U1:: -2 +0\n U1:: +3 -1\n\n\n >>> sym_T.SetRowRank(1)\n >>> sym_T.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: True\n on device : cpu\n braket_form : False\n |ket> <bra| \n --------------- \n | | \n 1 > __| 3 4 |__*> 2 \n | | \n | 2 |__ < 3 \n | | \n --------------- \n\n >>> tqin2,tqout2 = sym_T.GetTotalQnums()\n >>> print(tqin2)\n Dim = 2 |\n KET : U1:: -1 +1\n U1:: -2 +1\n U1:: -1 -2\n U1:: +2 +3\n\n >>> print(tqout2)\n Dim = 8 |\n BRA : U1:: -1 -6 +0 -5 +1 -4 +2 -3\n U1:: +3 +5 +1 +3 +1 +3 +1 +3\n U1:: -1 +1 -3 -1 -1 +1 -1 +1\n U1:: +2 -2 +3 -1 +1 -3 +0 -4\n \n >>> tqin2_phy, tqout2_phy = sym_T.GetTotalQnums(physical=True)\n >>> print(tqin2_phy)\n Dim = 12 |\n KET : U1:: +4 +3 +2 +1 +3 +2 +1 +0 +2 +1 +0 -1\n U1:: -3 -1 -1 -1 -1 +1 +1 +1 +0 +2 +2 +2\n U1:: +0 +2 +0 +0 -2 +0 -2 -2 +0 +2 +0 +0\n U1:: +1 +0 +2 +3 +2 +1 +3 +4 +1 +0 +2 +3\n\n >>> print(tqout2_phy)\n Dim = 2 |\n BRA : U1:: +1 -4\n U1:: +1 +3\n U1:: -2 +0\n U1:: +3 -1\n \n >>> print(tqin2 == tqin ) ## this should be False\n False\n\n >>> print(tqout2 == tqout) ## this should be False\n False\n\n >>> print(tqin2_phy == tqin) ## this should be true\n True\n \n >>> print(tqout2_phy == tqout) ## this should be true\n True\n\n \"\"\"\n if not self.is_symm:\n raise TypeError(\"UniTensor.GetTotalQnums\", \"[ERROR] GetTotal Qnums from a non-symm tensor\")\n\n # if (self.rowrank==0) or (self.rowrank==len(self.bonds)):\n # raise Exception(\"UniTensor.GetTotalQnums\",\"[ERROR] The TN symmetry structure is incorrect, without either any in-bond or any-outbond\")\n if physical:\n # virtual_cb-in\n cb_inbonds = copy.deepcopy(self.bonds[np.argwhere(self.braket == BondType[BD_KET]).flatten()])\n in_all = cb_inbonds[0]\n if len(cb_inbonds) > 1:\n in_all.combine(cb_inbonds[1:])\n\n cb_outbonds = copy.deepcopy(self.bonds[np.argwhere(self.braket == BondType[BD_BRA]).flatten()])\n out_all = cb_outbonds[0]\n if len(cb_outbonds) > 1:\n out_all.combine(cb_outbonds[1:])\n else:\n # virtual_cb-in\n cb_inbonds = copy.deepcopy(self.bonds[:self.rowrank]) * self.braket[:self.rowrank] * BondType[BD_KET]\n in_all = cb_inbonds[0]\n if len(cb_inbonds) > 1:\n in_all.combine(cb_inbonds[1:])\n cb_outbonds = copy.deepcopy(self.bonds[self.rowrank:]) * self.braket[self.rowrank:] * BondType[BD_BRA]\n out_all = cb_outbonds[0]\n if len(cb_outbonds) > 1:\n out_all.combine(cb_outbonds[1:])\n\n in_all.bondType = BD_KET\n out_all.bondType = BD_BRA\n\n return in_all, out_all\n\n def GetValidQnums(self, physical=False, return_shape=False):\n \"\"\"\n Return the quantum number set that has a valid block.\n\n Args:\n \n physical [default: False]:\n \n if set to True, return the unique quantum number sets defined by BD_BRA and BD_KET.\n\n The return 2D array has shape (# of blocks,qnum set)\n\n return_shape [default: False]:\n \n if set to True, return a 2D array with shape (# of blocks, size of each block)\n\n Return \n \n if return_shape == False: return [qnum sets, 2D ndarray] \n if return_shape == True : return [qnum sets, 2D ndarray], [shape (2D ndarray)]\n \n \n\n \"\"\"\n if physical:\n b_tqin, b_tqout = self.GetTotalQnums(physical=True)\n tqin_uni = b_tqin.GetUniqueQnums()\n tqout_uni = b_tqout.GetUniqueQnums()\n comm = _fx_GetCommRows(tqin_uni, tqout_uni)\n shap = []\n if return_shape:\n for q in comm:\n shap.append(np.array([b_tqin.GetDegeneracy(*q), b_tqout.GetDegeneracy(*q)]))\n return comm, np.array(shap)\n else:\n return comm\n else:\n comm = copy.deepcopy(self._block_qnums)\n if return_shape:\n b_tqin, b_tqout = self.GetTotalQnums(physical=False)\n shap = []\n for q in comm:\n shap.append(np.array([b_tqin.GetDegeneracy(*q), b_tqout.GetDegeneracy(*q)]))\n return comm, np.array(shap)\n else:\n return comm\n\n def PutBlock(self, block, *qnum):\n \"\"\"\n Put the block into the UniTensor. If the UniTensor is symmetry tensor, the block should be specify by the quantum number. \n \n Args:\n block:\n A UniTensor with rank-2\n \n *qnum:\n The quantum number set that specify the block.\n\n \n \"\"\"\n if not isinstance(block, self.__class__):\n raise TypeError(\"[ERROR] PutBlock can only accept a untagged UniTensor \")\n\n ## Note, block should be a UniTensor:\n if block.braket is not None:\n raise Exception(\"[ERROR] PutBlock can only accept a untagged UniTensor \")\n\n if not self.is_symm:\n ## check:\n\n if self.is_diag:\n if not block.is_diag:\n raise Exception(\n \"[ERROR] PutBlock for a is_diag=True tensor can only accept a block with is_diag=True\")\n\n if self.rowrank == 0:\n curr_shape_2d = torch.Size([self.Storage.numel()])\n if block.shape != curr_shape_2d:\n raise Exception(\"[ERROR] the shape of input Block\", block.shape,\n \"does not match the shape of current block\", curr_shape_2d)\n\n elif len(self.bonds) - self.rowrank == 0:\n curr_shape_2d = torch.Size([self.Storage.numel()])\n if block.shape != curr_shape_2d:\n raise Exception(\"[ERROR] the shape of input Block\", block.shape,\n \"does not match the shape of current block\", curr_shape_2d)\n else:\n curr_shape_2d = torch.Size([np.prod([x.dim for x in self.bonds[:self.rowrank]]),\n np.prod([x.dim for x in self.bonds[self.rowrank:]])])\n if block.shape != curr_shape_2d:\n raise Exception(\"[ERROR] the shape of input Block\", block.shape,\n \"does not match the shape of current block\", curr_shape_2d)\n\n ## memcpy low-lv-api\n #self.Storage.storage().copy_(block.Storage.storage())\n shp = self.Storage.shape\n self.Storage = block.Storage.clone().reshape(shp)\n # raise Exception(\"[Warning] PutBlock cannot be use for non-symmetry TN. Use SetElem instead.\")\n\n else:\n\n # raise Exception(\"Developing\")\n\n if len(qnum) != self.bonds[0].nsym:\n raise ValueError(\"UniTensor.PutBlock\", \"[ERROR] The quantum numbers do not match the number of types.\")\n\n ## check contiguous:\n if self._contiguous:\n is_set = False\n ## search if the tn has block of that qnums:\n for s in range(len(self._block_qnums)):\n if (np.array(qnum) == self._block_qnums[s]).all():\n ##check if shape is correct:\n if self.Storage[s].shape != block.shape:\n raise TypeError(\"UniTensor.PutBlock\", \"[ERROR] the input block with shape\", block.shape,\n \"does not match the current block's shape\", self.Storage[s].shape)\n #self.Storage[s].storage().copy_(block.Storage.storage())\n shp = self.Storage[s].shape\n self.Storage[s] = block.Storage.clone().reshape(shp)\n is_set = True\n break\n if not is_set:\n raise TypeError(\"UniTensor.PutBlock\", \"[ERROR] no block has qnums:\", qnum)\n\n else:\n\n ## search the current valid blocks :\n is_set = False\n for s in range(len(self._block_qnums)):\n if (np.array(qnum) == self._block_qnums[s]).all():\n ## get Nrowrank for the memory\n old_rowrank = len(self._Ket_invmapper_blks[0][0])\n\n accu_off = []\n tmp = 1\n for i in range(len(self.bonds)):\n accu_off.append(tmp)\n tmp *= self.bonds[-1 - i].dim\n accu_off = np.array(accu_off[::-1])\n\n new_accu_off_in = (accu_off[:self.rowrank] / accu_off[self.rowrank - 1]).astype(np.int)\n new_accu_off_out = accu_off[self.rowrank:]\n del accu_off\n\n ## copy from the right address.\n b_tqin, b_tqout = self.GetTotalQnums(physical=False)\n idx_in = np.argwhere((b_tqin.qnums == self._block_qnums[s]).all(axis=1)).flatten()\n idx_out = np.argwhere((b_tqout.qnums == self._block_qnums[s]).all(axis=1)).flatten()\n\n ## interface\n new_Ket_invmapper_blks = _fx_decompress_idx(idx_in, new_accu_off_in)\n # self._Ket_mapper_blks[idx_in,0] = b\n # self._Ket_mapper_blks[idx_in,1] = np.arange(len(idx_in)).astype(np.int)\n\n ## interface\n new_Bra_invmapper_blks = _fx_decompress_idx(idx_out, new_accu_off_out)\n # self._Bra_mapper_blks[idx_out,0] = b\n # self._Bra_mapper_blks[idx_out,1] = np.arange(len(idx_out)).astype(np.int)\n\n ## Get element only for this block from the right memory place:\n # old_rowrank = self._Ket_invmapper_blks[0].\n for i in range(len(idx_in)):\n for j in range(len(idx_out)):\n newidx = np.concatenate((new_Ket_invmapper_blks[i], new_Bra_invmapper_blks[j]))\n oldidx = newidx[self._inv_mapper]\n\n old_row = int(np.sum(self._accu_off_in * oldidx[:old_rowrank]))\n old_col = int(np.sum(self._accu_off_out * oldidx[old_rowrank:]))\n\n b_id_in = self._Ket_mapper_blks[old_row]\n b_id_out = self._Bra_mapper_blks[old_col]\n\n if b_id_in[0] != b_id_out[0]:\n raise Exception(\"[ERROR] internal FATAL\")\n\n if b_id_in[0] >= 0 and b_id_out[0] >= 0:\n self.Storage[b_id_in[0]][b_id_in[1], b_id_out[1]] = block.Storage[i, j].clone()\n else:\n print(\"[unphys pos]\")\n\n is_set = True\n break\n\n ## if there is no block with qnum:\n if not is_set:\n raise TypeError(\"UniTensor.PutBlock\", \"[ERROR] No block has qnums:\", qnum)\n\n def GetBlock(self, *qnum):\n \"\"\"\n Return the Block specify by the quantum number(s). If the UniTensor is non-symmetry, return self.\n\n Args:\n *qnum:\n The quantum number(s). Note that when get-block on a High-rank tensor, the quantum number represent the total quantum number of all the in(out)-bonds.\n\n Return:\n * UniTensor, rank-2 (for symmetry tensor)\n * a new rank-2 flattened UniTensor (for non-symmetry tensor)\n\n Example:\n * Single Symmetry::\n\n bd_sym_1 = tor10.Bond(3,tor10.BD_KET,qnums=[[0],[1],[2]])\n bd_sym_2 = tor10.Bond(4,tor10.BD_KET,qnums=[[-1],[2],[0],[2]])\n bd_sym_3 = tor10.Bond(5,tor10.BD_BRA,qnums=[[4],[2],[2],[5],[1]])\n sym_T = tor10.UniTensor(bonds=[bd_sym_1,bd_sym_2,bd_sym_3],rowrank=2,labels=[10,11,12],dtype=torch.float64)\n\n >>> sym_T.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: True\n on device : cpu\n braket_form : True\n |ket> <bra| \n --------------- \n | | \n 10 > __| 3 5 |__ < 12 \n | | \n 11 > __| 4 | \n | | \n --------------- \n\n >>> q_in, q_out = sym_T.GetTotalQnums()\n >>> print(q_in)\n Dim = 12 |\n KET : U1:: +4 +4 +2 +1 +3 +3 +1 +0 +2 +2 +0 -1\n\n >>> print(q_out)\n Dim = 5 |\n BRA : U1:: +5 +4 +2 +2 +1\n\n >>> bk2 = sym_T.GetBlock(2)\n >>> bk2.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 2\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 0 ____| 3 2 |____ 1 \n \\ / \n ------------- \n \n >>> print(bk2)\n Tensor name: \n is_diag : False\n tensor([[0., 0.],\n [0., 0.],\n [0., 0.]], dtype=torch.float64)\n\n * Multiple Symmetry::\n\n ## multiple Qnum:\n ## U1 x U1 x U1 x U1\n bd_sym_1 = tor10.Bond(3,tor10.BD_KET,qnums=[[0, 2, 1, 0],\n [1, 1,-1, 1],\n [2,-1, 1, 0]])\n bd_sym_2 = tor10.Bond(4,tor10.BD_KET,qnums=[[-1, 0,-1, 3],\n [ 0, 0,-1, 2],\n [ 1, 0, 1, 0],\n [ 2,-2,-1, 1]])\n bd_sym_3 = tor10.Bond(2,tor10.BD_BRA,qnums=[[-1,-2,-1,2],\n [ 1, 1, -2,3]])\n\n sym_T = tor10.UniTensor(bonds=[bd_sym_1,bd_sym_2,bd_sym_3],rowrank=2,labels=[1,2,3],dtype=torch.float64)\n\n >>> tqin, tqout = sym_T.GetTotalQnums()\n >>> print(tqin)\n Dim = 12 |\n KET : U1:: +4 +3 +2 +1 +3 +2 +1 +0 +2 +1 +0 -1\n U1:: -3 -1 -1 -1 -1 +1 +1 +1 +0 +2 +2 +2\n U1:: +0 +2 +0 +0 -2 +0 -2 -2 +0 +2 +0 +0\n U1:: +1 +0 +2 +3 +2 +1 +3 +4 +1 +0 +2 +3\n\n >>> print(tqout)\n Dim = 2 |\n BRA : U1:: +1 -1\n U1:: +1 -2\n U1:: -2 -1\n U1:: +3 +2\n\n >>> block_1123 = sym_T.GetBlock(1,1,-2,3)\n >>> print(block_1123)\n Tensor name: \n is_diag : False\n tensor([[0.]], dtype=torch.float64)\n\n\n\n\n \"\"\"\n if not self.is_symm:\n\n if self.is_diag:\n bds = [Bond(self.Storage.bonds[0].dim), Bond(self.Storage.bonds[0].dim)]\n tmp = UniTensor(bonds=bds, rowrank=1, check=False, is_diag=True)\n tmp._mac(torch_tensor=self.Storage.clone())\n return tmp\n else:\n\n if self.rowrank == 0:\n bds = [Bond(self.Storage.numel())]\n tmp = UniTensor(bonds=bds, rowrank=0, check=False)\n tmp._mac(torch_tensor=self.Storage.flatten())\n return tmp\n\n elif len(self.bonds) - self.rowrank == 0:\n bds = [Bond(self.Storage.numel())]\n tmp = UniTensor(bonds=bds, rowrank=1, check=False)\n tmp._mac(torch_tensor=self.Storage.flatten())\n return tmp\n else:\n bds = [Bond(np.prod([x.dim for x in self.bonds[:self.rowrank]])),\n Bond(np.prod([x.dim for x in self.bonds[self.rowrank:]]))]\n\n tmp = UniTensor(bonds=bds, rowrank=1, check=False)\n tmp._mac(torch_tensor=self.Storage.reshape(bds[0].dim, -1))\n return tmp\n else:\n # raise Exception(\"[Developing]\")\n\n # if not self.is_braket:\n # raise Exception(\"[ERROR] Can only get block from a symmetry Tensor in it's bra-ket form\\n Suggestion: call to_braket_form() or manually permute the tensor to the braket form. before get-block\")\n\n if len(qnum) != self.bonds[0].nsym:\n raise ValueError(\"UniTensor.GetBlock\", \"[ERROR] The qnumtum numbers not match the number of type.\")\n\n ## check contiguous:\n if self._contiguous:\n ## search if the tn has block of that qnums:\n for s in range(len(self._block_qnums)):\n if (np.array(qnum) == self._block_qnums[s]).all():\n tmp = UniTensor(bonds=[Bond(self.Storage[s].shape[0]), Bond(self.Storage[s].shape[1])],\n rowrank=1,\n check=False)\n tmp._mac(torch_tensor=self.Storage[s].clone())\n return tmp\n ## if there is no block with qnum:\n raise TypeError(\"UniTensor.GetBlock\", \"[ERROR] No block has qnums:\", qnum)\n else:\n ## search the current valid blocks :\n for s in range(len(self._block_qnums)):\n if (np.array(qnum) == self._block_qnums[s]).all():\n ## get Nrowrank for the memory\n old_rowrank = len(self._Ket_invmapper_blks[0][0])\n\n accu_off = []\n tmp = 1\n for i in range(len(self.bonds)):\n accu_off.append(tmp)\n tmp *= self.bonds[-1 - i].dim\n accu_off = np.array(accu_off[::-1])\n\n new_accu_off_in = (accu_off[:self.rowrank] / accu_off[self.rowrank - 1]).astype(np.int)\n new_accu_off_out = accu_off[self.rowrank:]\n del accu_off\n\n ## copy from the right address.\n b_tqin, b_tqout = self.GetTotalQnums(physical=False)\n idx_in = np.argwhere((b_tqin.qnums == self._block_qnums[s]).all(axis=1)).flatten()\n idx_out = np.argwhere((b_tqout.qnums == self._block_qnums[s]).all(axis=1)).flatten()\n\n ## Create only the block:\n Block = torch.zeros((len(idx_in), len(idx_out)), device=self.device, dtype=self.dtype)\n\n ## interface\n new_Ket_invmapper_blks = _fx_decompress_idx(idx_in, new_accu_off_in)\n # self._Ket_mapper_blks[idx_in,0] = b\n # self._Ket_mapper_blks[idx_in,1] = np.arange(len(idx_in)).astype(np.int)\n\n ## interface\n new_Bra_invmapper_blks = _fx_decompress_idx(idx_out, new_accu_off_out)\n # self._Bra_mapper_blks[idx_out,0] = b\n # self._Bra_mapper_blks[idx_out,1] = np.arange(len(idx_out)).astype(np.int)\n\n ## Get element only for this block from the right memory place:\n # old_rowrank = self._Ket_invmapper_blks[0].\n for i in range(len(idx_in)):\n for j in range(len(idx_out)):\n newidx = np.concatenate((new_Ket_invmapper_blks[i], new_Bra_invmapper_blks[j]))\n oldidx = newidx[self._inv_mapper]\n\n old_row = int(np.sum(self._accu_off_in * oldidx[:old_rowrank]))\n old_col = int(np.sum(self._accu_off_out * oldidx[old_rowrank:]))\n\n b_id_in = self._Ket_mapper_blks[old_row]\n b_id_out = self._Bra_mapper_blks[old_col]\n\n ## [DEBUG] >>>\n if DEBUG:\n if b_id_in[0] != b_id_out[0]:\n raise Exception(\"[ERROR] internal FATAL\")\n ## <<<\n\n if b_id_in[0] >= 0 and b_id_out[0] >= 0:\n Block[i, j] = self.Storage[b_id_in[0]][b_id_in[1], b_id_out[1]]\n else:\n ## [DEBUG] >>>\n if DEBUG:\n print(\"[ERROR] unphys pos!\")\n ## <<<<\n\n tmp = UniTensor(bonds=[Bond(Block.shape[0]), Bond(Block.shape[1])],\n check=False,\n rowrank=1)\n tmp._mac(torch_tensor=Block)\n return tmp\n ## if there is no block with qnum:\n raise TypeError(\"UniTensor.GetBlock\", \"[ERROR] No block has qnums:\", qnum)\n\n def torch(self):\n \"\"\"\n Transform a UniTensor to torch.Tensor. \n\n [Note]\n \n 1. this cannot be operate on a UniTensor with symmetry.\n 2. the return tensor will not share the same memory with the UniTensor.\n\n Return:\n \n torch.Tensor\n\n\n \"\"\"\n if self.is_symm:\n raise Exception(\"[ERROR] cannot transform the UniTensor with symmetry to torch.Tensor. GetBlock first.\")\n else:\n return self.Storage.clone()\n\n ## Autograd feature:\n def requires_grad(self, is_grad=None):\n \"\"\"\n The status for the autograd property.\n\n Args:\n is_grad:\n bool, if the autograd mechanism should be activate on this UniTensor.\n If the argument is not set, it will return the current autograd status.\n\n Return:\n bool, return only when is_grad argument is ignored.\n\n Example:\n ::\n bds_x = [tor10.Bond(5),tor10.Bond(5),tor10.Bond(3)]\n x = tor10.UniTensor(bonds=bds_x, rowrank=2, labels=[4,3,5])\n\n\n >>> print(x.requires_grad())\n False\n\n >>> x.requires_grad(True)\n >>> print(x.requires_grad())\n True\n\n >>> x.requires_grad(False)\n >>> print(x.requires_grad())\n False\n\n\n \"\"\"\n if is_grad is None:\n if self.is_symm:\n return self.Storage[0].requires_grad\n else:\n return self.Storage.requires_grad\n else:\n if self.is_symm:\n for s in range(len(self.Storage)):\n self.Storage[s].requires_grad_(bool(is_grad))\n else:\n self.Storage.requires_grad_(bool(is_grad))\n\n def grad(self):\n \"\"\"\n Return the gradient tensors subject to x where x is the current UniTensor. The return is None by default and becomes a UniTensor the first time a call backward(). The future calls to backward() will accumulate (add) gradient into it.\n\n This is the same as torch.Tensor.grad\n\n\n :math:`d/dx`\n\n Return:\n UniTensor, the shape of the return UniTensor and it's bonds are the same as the original UniTensor, but with default labels.\n\n Example:\n\n >>> x = tor10.UniTensor(bonds=[tor10.Bond(2),tor10.Bond(2)],rowrank=1,requires_grad=True)\n >>> print(x)\n Tensor name:\n is_diag : False\n tensor([[0., 0.],\n [0., 0.]], dtype=torch.float64, requires_grad=True)\n\n >>> y = (x + 4)**2\n >>> print(y)\n Tensor name:\n is_diag : False\n tensor([[16., 16.],\n [16., 16.]], dtype=torch.float64, grad_fn=<PowBackward0>)\n\n >>> out = tor10.Mean(y)\n >>> print(out)\n Tensor name:\n is_diag : False\n tensor(16., dtype=torch.float64, grad_fn=<MeanBackward1>)\n\n >>> out.backward()\n >>> print(x.grad())\n Tensor name:\n is_diag : False\n tensor([[2., 2.],\n [2., 2.]], dtype=torch.float64)\n\n \"\"\"\n if not self.requires_grad():\n return None\n else:\n if self.is_symm:\n\n tmp = UniTensor(bonds=self.bonds,\n labels=self.labels,\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=[self.Storage[s].grad for s in range(len(self.Storage))],\n braket=self.braket,\n sym_mappers=(self._mapper, self._inv_mapper,\n self._Ket_mapper_blks, self._Ket_invmapper_blks,\n self._Bra_mapper_blks, self._Bra_invmapper_blks,\n self._contiguous, self._accu_off_in, self._accu_off_out))\n # raise Exception(\"Developing\")\n else:\n tmp = UniTensor(bonds=copy.deepcopy(self.bonds),\n rowrank=self.rowrank,\n check=False)\n tmp._mac(torch_tensor=self.Storage.grad)\n return tmp\n\n def backward(self):\n \"\"\"\n Backward the gradient flow in the constructed autograd graph. This is the same as torch.Tensor.backward\n \"\"\"\n if self.is_symm:\n for s in range(len(self.Storage)):\n self.Storage[s].backward()\n\n else:\n self.Storage.backward()\n\n def detach(self):\n \"\"\"\n Detach the current tensor from the current graph, making it a leaf. This is the same as torch.Tensor.detach_()\n\n Return:\n self\n \"\"\"\n if self.is_symm:\n for s in range(len(self.Storage)):\n self.Storage[s].detach_()\n else:\n self.Storage.detach_()\n return self\n\n\n###############################################################\n#\n# Action function\n#\n##############################################################\n## I/O\ndef Save(a, filename):\n \"\"\"\n Save a UniTensor to the file\n\n Args:\n a:\n The UniTensor that to be saved.\n\n filename:\n The saved file path\n\n Example:\n ::\n a = tor10.UniTensor(bonds=[tor10.Bond(3),tor10.Bond(4)],rowrank=1)\n tor10.Save(a,\"a.uniT\")\n\n \"\"\"\n if not isinstance(filename, str):\n raise TypeError(\"Save\", \"[ERROR] Invalid filename.\")\n if not isinstance(a, UniTensor):\n raise TypeError(\"Save\", \"[ERROR] input must be the UniTensor\")\n f = open(filename, \"wb\")\n pkl.dump(a, f)\n f.close()\n\n\ndef Load(filename):\n \"\"\"\n Load a UniTensor from the file.\n\n Args:\n filename:\n The path of the file to be loaded\n\n Return:\n UniTensor\n\n Example:\n ::\n a = tor10.Load(\"a.uniT\")\n\n \"\"\"\n if not isinstance(filename, str):\n raise TypeError(\"UniTensor.Save\", \"[ERROR] Invalid filename.\")\n if not os.path.exists(filename):\n raise Exception(\"UniTensor.Load\", \"[ERROR] file not exists\")\n\n f = open(filename, 'rb')\n tmp = pkl.load(f)\n f.close()\n if not isinstance(tmp, UniTensor):\n raise TypeError(\"Load\", \"[ERROR] loaded object is not the UniTensor\")\n\n return tmp\n\n\ndef Contract(a, b):\n \"\"\"\n Contract two tensors with the same labels.\n\n 1. two tensors must be the same type, if \"a\" is a symmetry/untagged/tagged tensor, \"b\" must also be a symmetry/untagged/tagged tensor.\n 2. When contract two symmetry tensor, the bonds that to be contracted must have the same qnums.\n\n 3. For tagged tensor, Each bra-bond can only contract with ket-bond, in terms of physical meaning, this means the contract traceing out the matched bra-ket.\n\n [Note] the argument \"a\" and \"b\" tensor defines the order of the out-come bond. After contract, the order of remaining bonds (both in-bond(bra) and out-bond(ket)) that appears in the new-tensor will follows the rule: a's in-bond will appears first, then the b's in-bond; a's out-bond will appears first, then b's out-bond (see example in below)\n\n\n Args:\n a:\n UniTensor\n\n b:\n UniTensor\n\n\n Return:\n UniTensor\n\n Example:\n ::\n x = tor10.UniTensor(bonds=[tor10.Bond(5),tor10.Bond(2),tor10.Bond(4),tor10.Bond(3)], rowrank=2,labels=[6,1,7,8])\n y = tor10.UniTensor(bonds=[tor10.Bond(4),tor10.Bond(2),tor10.Bond(3),tor10.Bond(6)], rowrank=2,labels=[7,2,10,9])\n\n\n >>> x.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 6 ____| 5 4 |____ 7 \n | | \n 1 ____| 2 3 |____ 8 \n \\ / \n ------------- \n\n >>> y.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 4\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 7 ____| 4 3 |____ 10 \n | | \n 2 ____| 2 6 |____ 9 \n \\ / \n ------------- \n\n >>> c = tor10.Contract(x,y)\n >>> c.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 6\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 6 ____| 5 3 |____ 8 \n | | \n 1 ____| 2 3 |____ 10 \n | | \n 2 ____| 2 6 |____ 9 \n \\ / \n ------------- \n\n >>> d = tor10.Contract(y,x)\n >>> d.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 6\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 2 ____| 2 3 |____ 10 \n | | \n 6 ____| 5 6 |____ 9 \n | | \n 1 ____| 2 3 |____ 8 \n \\ / \n ------------- \n\n\n Note that you can also contract for UniTensor with symmetry, even when they are not in the bra-ket form. As long as the quantum number on the to-be-contract bonds and the bond type matches (bra can only contract with ket)\n ::\n bd_sym_1a = tor10.Bond(3,tor10.BD_KET,qnums=[[0],[1],[2]])\n bd_sym_2a = tor10.Bond(4,tor10.BD_KET,qnums=[[-1],[2],[0],[2]])\n bd_sym_3a = tor10.Bond(5,tor10.BD_BRA,qnums=[[4],[2],[-1],[5],[1]])\n\n bd_sym_1b = tor10.Bond(3,tor10.BD_BRA,qnums=[[0],[1],[2]])\n bd_sym_2b = tor10.Bond(4,tor10.BD_BRA,qnums=[[-1],[2],[0],[2]])\n bd_sym_3b = tor10.Bond(7,tor10.BD_KET,qnums=[[1],[3],[-2],[2],[2],[2],[0]])\n\n sym_A = tor10.UniTensor(bonds=[bd_sym_1a,bd_sym_2a,bd_sym_3a],rowrank=2,labels=[10,11,12])\n sym_B = tor10.UniTensor(bonds=[bd_sym_2b,bd_sym_1b,bd_sym_3b],rowrank=1,labels=[11,10,7])\n\n\n >>> sym_A.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: True\n on device : cpu\n braket_form : True\n |ket> <bra| \n --------------- \n | | \n 10 > __| 3 5 |__ < 12 \n | | \n 11 > __| 4 | \n | | \n --------------- \n\n\n >>> sym_B.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 3\n has_symmetry: True\n on device : cpu\n braket_form : False\n |ket> <bra| \n --------------- \n | | \n 11 <*__| 4 3 |__ < 10 \n | | \n | 7 |__*> 7 \n | | \n --------------- \n\n >>> sym_AB = tor10.Contract(sym_A,sym_B)\n >>> sym_BA = tor10.Contract(sym_B,sym_A)\n >>> sym_AB.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 2\n has_symmetry: True\n on device : cpu\n braket_form : False\n |ket> <bra| \n --------------- \n | | \n 12 <*__| 5 7 |__*> 7 \n | | \n --------------- \n \n >>> sym_BA.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 2\n has_symmetry: True\n on device : cpu\n braket_form : True\n |ket> <bra| \n --------------- \n | | \n 7 > __| 7 5 |__ < 12 \n | | \n --------------- \n\n \"\"\"\n if isinstance(a, UniTensor) and isinstance(b, UniTensor):\n\n ## check:\n if (a.is_symm != b.is_symm) or ((a.braket is None) != (b.braket is None)):\n raise TypeError(\"Contract(a,b)\", \"[ERROR] the tensors should be the same type to be contracted\")\n\n ## get same vector:\n same, a_ind, b_ind = np.intersect1d(a.labels, b.labels, return_indices=True)\n\n aind_no_combine = np.setdiff1d(np.arange(len(a.labels)), a_ind)\n bind_no_combine = np.setdiff1d(np.arange(len(b.labels)), b_ind)\n\n ## master switch\n if a.is_symm:\n ## contract symm tensor > \n if len(same):\n\n for i in range(len(a_ind)):\n if not a.bonds[a_ind[i]].qnums.all() == b.bonds[b_ind[i]].qnums.all():\n raise ValueError(\"Contact(a,b)\", \"[ERROR] contract Bonds that has qnums mismatch.\")\n\n if False in np.unique((a.braket[a_ind] + b.braket[b_ind]) == 0):\n raise Exception(\"Contract(a,b)\", \"[ERROR] bra-bond can only contract with ket-bond\")\n\n tmpa = copy.deepcopy(a)\n tmpb = copy.deepcopy(b)\n tmpa.Permute(np.append(aind_no_combine, a_ind), rowrank=len(a.labels) - len(a_ind),\n by_label=False).Contiguous_()\n tmpb.Permute(np.append(b_ind, bind_no_combine), rowrank=len(b_ind), by_label=False).Contiguous_()\n\n # tmpa.Print_diagram()\n # print(tmpa)\n # print(tmpa.GetValidQnums(return_shape=True))\n # tmpb.Print_diagram()\n # print(tmpb)\n # print(tmpb.GetValidQnums(return_shape=True))\n # tmpa._Bra_mapper_blks\n\n aQ = tmpa.GetValidQnums()\n bQ = tmpb.GetValidQnums()\n\n out = UniTensor(bonds=np.append(tmpa.bonds[:tmpa.rowrank], tmpb.bonds[tmpb.rowrank:]),\n labels=np.append(tmpa.labels[:tmpa.rowrank], tmpb.labels[tmpb.rowrank:]),\n dtype=a.dtype,\n device=a.device)\n\n oQ = out.GetValidQnums()\n\n for obid in range(len(oQ)):\n ab = None\n for abid in range(len(aQ)):\n if (oQ[obid] == aQ[abid]).all():\n ab = abid\n break\n\n bb = None\n for bbid in range(len(bQ)):\n if (oQ[obid] == bQ[bbid]).all():\n bb = bbid\n break\n\n if (ab is not None) and (bb is not None):\n out.Storage[obid] = torch.matmul(tmpa.Storage[ab], tmpb.Storage[bb])\n\n return out\n\n else:\n ## product!!\n raise Exception(\"Developing\")\n\n\n\n\n else:\n ## contract non-sym tensor > \n\n if len(same):\n if a.is_diag:\n\n tmpa = torch.diag(a.Storage).to(a.Storage.device)\n else:\n tmpa = a.Storage\n\n if b.is_diag:\n tmpb = torch.diag(b.Storage).to(b.Storage.device)\n else:\n tmpb = b.Storage\n\n if a.braket is None:\n ## contract untagged \n tmp = torch.tensordot(tmpa, tmpb, dims=(a_ind.tolist(), b_ind.tolist()))\n\n new_bonds = np.concatenate(\n [copy.deepcopy(a.bonds[aind_no_combine]), copy.deepcopy(b.bonds[bind_no_combine])])\n new_io = [(aind_no_combine[x] >= a.rowrank) for x in range(len(aind_no_combine))] + [\n (bind_no_combine[x] >= b.rowrank) for x in range(len(bind_no_combine))]\n new_labels = np.concatenate(\n [copy.copy(a.labels[aind_no_combine]), copy.copy(b.labels[bind_no_combine])])\n\n new_io = np.array(new_io)\n # print(new_io)\n if len(new_bonds) > 0:\n mapper = np.argsort(new_io)\n new_bonds = new_bonds[mapper]\n new_labels = new_labels[mapper]\n tmp = tmp.permute(*mapper)\n\n out = UniTensor(bonds=new_bonds,\n labels=new_labels,\n rowrank=len(np.argwhere(new_io == 0)),\n check=False)\n out._mac(torch_tensor=tmp)\n\n return out\n else:\n ## tagged\n if False in np.unique((a.braket[a_ind] + b.braket[b_ind]) == 0):\n raise Exception(\"Contract(a,b)\", \"[ERROR] in-bond(bra) can only contract with out-bond (ket)\")\n\n tmp = torch.tensordot(tmpa, tmpb, dims=(a_ind.tolist(), b_ind.tolist()))\n\n new_bonds = np.concatenate(\n [copy.deepcopy(a.bonds[aind_no_combine]), copy.deepcopy(b.bonds[bind_no_combine])])\n new_io = [(aind_no_combine[x] >= a.rowrank) for x in range(len(aind_no_combine))] + [\n (bind_no_combine[x] >= b.rowrank) for x in range(len(bind_no_combine))]\n new_labels = np.concatenate(\n [copy.copy(a.labels[aind_no_combine]), copy.copy(b.labels[bind_no_combine])])\n new_braket = np.concatenate(\n [copy.copy(a.braket[aind_no_combine]), copy.copy(b.braket[bind_no_combine])])\n\n new_io = np.array(new_io)\n # print(new_io)\n if len(new_bonds) > 0:\n mapper = np.argsort(new_io)\n new_bonds = new_bonds[mapper]\n new_labels = new_labels[mapper]\n new_braket = new_braket[mapper]\n tmp = tmp.permute(*mapper)\n\n out = UniTensor(bonds=new_bonds,\n labels=new_labels,\n rowrank=len(np.argwhere(new_io == 0)),\n check=False)\n out._mac(braket=new_braket, torch_tensor=tmp)\n\n return out\n else:\n ## product!!\n if a.is_diag:\n tmpa = torch.diag(a.Storage)\n else:\n tmpa = a.Storage\n\n if b.is_diag:\n tmpb = torch.diag(b.Storage)\n else:\n tmpb = b.Storage\n\n if a.braket is None:\n ## untagged \n tmp = torch.tensordot(tmpa, tmpb, dims=0)\n new_bonds = np.concatenate([copy.deepcopy(a.bonds), copy.deepcopy(b.bonds)])\n new_labels = np.concatenate([copy.copy(a.labels), copy.copy(b.labels)])\n new_io = [(x >= a.rowrank) for x in range(len(a.bonds))] + [(x >= b.rowrank) for x in\n range(len(b.bonds))]\n\n if len(new_bonds) > 0:\n mapper = np.argsort(new_io)\n new_bonds = new_bonds[mapper]\n new_labels = new_labels[mapper]\n tmp = tmp.permute(*mapper)\n\n out = UniTensor(bonds=new_bonds,\n labels=new_labels,\n rowrank=a.rowrank + b.rowrank,\n check=False)\n out._mac(torch_tensor=tmp)\n return out\n\n else:\n ## tagged\n tmp = torch.tensordot(tmpa, tmpb, dims=0)\n new_bonds = np.concatenate([copy.deepcopy(a.bonds), copy.deepcopy(b.bonds)])\n new_labels = np.concatenate([copy.copy(a.labels), copy.copy(b.labels)])\n new_braket = np.concatenate([copy.copy(a.braket), copy.copy(b.braket)])\n new_io = [(x >= a.rowrank) for x in range(len(a.bonds))] + [(x >= b.rowrank) for x in\n range(len(b.bonds))]\n\n if len(new_bonds) > 0:\n mapper = np.argsort(new_io)\n new_bonds = new_bonds[mapper]\n new_labels = new_labels[mapper]\n new_braket = new_braket[mapper]\n tmp = tmp.permute(*mapper)\n\n out = UniTensor(bonds=new_bonds,\n labels=new_labels,\n rowrank=a.rowrank + b.rowrank,\n check=False)\n out._mac(braket=new_braket,\n torch_tensor=tmp)\n return out\n if len(same):\n\n # if(a.is_symm):\n # for i in range(len(a_ind)):\n # if not a.bonds[a_ind[i]].qnums.all() == b.bonds[b_ind[i]].qnums.all():\n # raise ValueError(\"Contact(a,b)\",\"[ERROR] contract Bonds that has qnums mismatch.\")\n\n ## check bra-ket\n if a.braket is None:\n pass\n else:\n if False in np.unique((a.braket[a_ind] + b.braket[b_ind]) == 0):\n raise Exception(\"Contract(a,b)\", \"[ERROR] in-bond(bra) can only contract with out-bond (ket)\")\n\n aind_no_combine = np.setdiff1d(np.arange(len(a.labels)), a_ind)\n bind_no_combine = np.setdiff1d(np.arange(len(b.labels)), b_ind)\n\n if a.is_diag:\n tmpa = torch.diag(a.Storage).to(a.Storage.device)\n else:\n tmpa = a.Storage\n\n if b.is_diag:\n tmpb = torch.diag(b.Storage).to(b.Storage.device)\n else:\n tmpb = b.Storage\n\n tmp = torch.tensordot(tmpa, tmpb, dims=(a_ind.tolist(), b_ind.tolist()))\n\n new_bonds = np.concatenate(\n [copy.deepcopy(a.bonds[aind_no_combine]), copy.deepcopy(b.bonds[bind_no_combine])])\n new_io = [(aind_no_combine[x] >= a.rowrank) for x in range(len(aind_no_combine))] + [\n (bind_no_combine[x] >= b.rowrank) for x in range(len(bind_no_combine))]\n new_labels = np.concatenate([copy.copy(a.labels[aind_no_combine]), copy.copy(b.labels[bind_no_combine])])\n\n new_io = np.array(new_io)\n # print(new_io)\n if len(new_bonds) > 0:\n mapper = np.argsort(new_io)\n new_bonds = new_bonds[mapper]\n new_labels = new_labels[mapper]\n tmp = tmp.permute(*mapper)\n\n out = UniTensor(bonds=new_bonds,\n labels=new_labels,\n rowrank=len(np.argwhere(new_io == 0)),\n check=False)\n out._mac(torch_tensor=tmp)\n\n return out\n else:\n ## direct product\n\n if a.is_diag:\n tmpa = torch.diag(a.Storage)\n else:\n tmpa = a.Storage\n\n if b.is_diag:\n tmpb = torch.diag(b.Storage)\n else:\n tmpb = b.Storage\n\n tmp = torch.tensordot(tmpa, tmpb, dims=0)\n new_bonds = np.concatenate([copy.deepcopy(a.bonds), copy.deepcopy(b.bonds)])\n new_labels = np.concatenate([copy.copy(a.labels), copy.copy(b.labels)])\n new_io = [(x >= a.rowrank) for x in range(len(a.bonds))] + [(x >= b.rowrank) for x in range(len(b.bonds))]\n\n if len(new_bonds) > 0:\n mapper = np.argsort(new_io)\n new_bonds = new_bonds[mapper]\n new_labels = new_labels[mapper]\n tmp = tmp.permute(*mapper)\n\n out = UniTensor(bonds=new_bonds,\n labels=new_labels,\n rowrank=a.rowrank + b.rowrank,\n check=False)\n out._mac(torch_tensor=tmp)\n return out\n else:\n raise Exception('Contract(a,b)', \"[ERROR] a and b both have to be UniTensor\")\n\n\n## The functions that start with \"_\" are the private functions\n\ndef _CombineBonds(a, idxs, new_label, permute_back):\n \"\"\"\n [Private function, should not be called directly by user]\n\n This function combines the bonds in input UniTensor [a] by the specified labels [label]. The bondType of the combined bonds will always follows the same bondType of bond in [a] with label of the largest index element in [label]\n\n Args:\n\n a:\n UniTensor\n\n idxs:\n\n index that to be combined. It should be a int list / numpy array of the label. All the bonds with specified labels in the current UniTensor will be combined\n\n new_label:\n the new_label of the combined bond \n\n permute_back:\n Set if the current bond should be permute back\n\n \n\n \"\"\"\n if isinstance(a, UniTensor):\n\n idx_no_combine = np.setdiff1d(np.arange(len(a.labels)),\n idxs) ## idx_no_combine will be from small to large, sorted!\n old_shape = np.array(a.shape)\n\n combined_dim = old_shape[idxs]\n combined_dim = np.prod(combined_dim)\n no_combine_dims = old_shape[idx_no_combine]\n\n ## Set new label if appears.\n if new_label is not None:\n newlbl = int(new_label)\n if newlbl in a.labels[idx_no_combine] or newlbl in a.labels[idxs[1:]]:\n raise Exception(\"_CombineBonds\",\n \"[ERROR], cannot set new_label to %d as there will be duplicate bond with this label after combined\" % newlbl)\n\n a.labels[idxs[0]] = newlbl\n\n ##------------------------------------\n ## master switch \n if a.is_symm:\n ## symmetry\n # raise Exception(\"[Develope]\")\n\n ## check if the combine are BRA or KET\n contype_inout = np.unique(a.braket[idxs])\n if len(contype_inout) != 1:\n raise Exception(\"_CombineBonds\",\n \"[ERROR], label_to_combine should be all bra-bond or all ket-bond for Tensor with symmetry\")\n\n if idxs[0] < a.rowrank:\n a.Permute(np.concatenate((idxs, idx_no_combine)), rowrank=len(idxs), by_label=False)\n ## put it on the contiguous form:\n a.Contiguous_()\n\n ## DEBUG >>>\n if DEBUG:\n if not a.is_contiguous():\n raise Exception(\"[ERROR][DEBUG][internal] non-contiguous!!\")\n ## <<<\n\n ##[Fusion tree] >>>\n # new_Nin = a.rowrank\n for i in range(len(idxs) - 1):\n # if idxs[1+i]<a.rowrank:\n # new_Nin-=1\n a.bonds[0].combine(a.bonds[1 + i])\n ## <<<\n\n del_pos = np.arange(1, len(idxs), 1).astype(np.int)\n a.labels = np.delete(a.labels, del_pos)\n a.braket = np.delete(a.braket, del_pos)\n a.bonds = np.delete(a.bonds, del_pos)\n\n ##house keeping mappers \n for b in range(len(a.Storage)):\n a._Ket_invmapper_blks[b] = np.sum(a._Ket_invmapper_blks[b] * a._accu_off_in, axis=1)\n\n a._accu_off_in = np.array([1], dtype=np.int)\n a._mapper = np.arange(len(a.labels), dype=np.int) ## contiguous, so we just init\n a._inv_mapper = copy.copy(a._mapper)\n\n a.rowrank = 1\n\n else:\n a.Permute(np.concatenate((idx_no_combine, idxs[::-1])), rowrank=len(a.labels) - len(idxs),\n by_label=False)\n ## put it on the contiguous form:\n a.Contiguous_()\n print(a.labels)\n ## DEBUG >>>\n if DEBUG:\n if not a.is_contiguous():\n raise Exception(\"[ERROR][DEBUG][internal] non-contiguous!!\")\n ## <<<\n\n ##[Fusion tree] >>>\n # new_Nin = a.rowrank\n for i in range(len(idxs) - 1):\n # if idxs[1+i]<a.rowrank:\n # new_Nin-=1\n a.bonds[-1].combine(a.bonds[-2 - i])\n ## <<<\n\n del_pos = np.arange(len(a.labels) - len(idxs), len(a.labels) - 1, 1).astype(np.int)\n print(del_pos)\n a.labels = np.delete(a.labels, del_pos)\n a.braket = np.delete(a.braket, del_pos)\n a.bonds = np.delete(a.bonds, del_pos)\n\n ##house keeping mappers \n for b in range(len(a.Storage)):\n a._Bra_invmapper_blks[b] = np.sum(a._Bra_invmapper_blks[b] * a._accu_off_out, axis=1)\n\n a._accu_off_out = np.array([1], dtype=np.int)\n a._mapper = np.arange(len(a.labels), dtype=np.int) ## contiguous, so we just init\n a._inv_mapper = copy.copy(a._mapper)\n\n a.rowrank = len(a.labels) - 1\n\n if permute_back:\n a.braket_form()\n\n\n else:\n ## non-symm\n if a.is_diag:\n raise TypeError(\"_CombineBonds\", \"[ERROR] CombineBonds doesn't support diagonal matrix.\")\n\n if a.braket is None:\n ## untagged type:\n\n if permute_back:\n\n ##[Fusion tree] >>>\n new_Nin = a.rowrank\n for i in range(len(idxs) - 1):\n if idxs[1 + i] < a.rowrank:\n new_Nin -= 1\n a.bonds[idxs[0]].combine(a.bonds[idxs[1 + i]])\n ## <<<\n\n mapper = np.concatenate([idxs, idx_no_combine])\n a.Storage = a.Storage.permute(mapper.tolist()).contiguous().view(\n np.append(combined_dim, no_combine_dims).tolist())\n\n f_label = a.labels[idxs[0]]\n a.bonds = np.delete(a.bonds, idxs[1:])\n a.labels = np.delete(a.labels, idxs[1:])\n \n x = np.argwhere(a.labels == f_label)\n final_mapper = np.insert(np.arange(1, len(a.bonds), 1).astype(np.int), x[0], 0)\n a.Stoarge = a.Storage.permute(final_mapper.tolist())\n\n a.rowrank = new_Nin\n\n else:\n ##[Fusion tree] >>>\n for i in range(len(idxs) - 1):\n a.bonds[idxs[0]].combine(a.bonds[idxs[1 + i]])\n ## <<<\n if idxs[0] >= a.rowrank:\n mapper = np.concatenate([idx_no_combine, idxs])\n a.bonds = np.append(a.bonds[idx_no_combine], a.bonds[idxs[0]])\n a.labels = np.append(a.labels[idx_no_combine], a.labels[idxs[0]])\n a.Storage = a.Storage.permute(mapper.tolist()).contiguous().view(\n np.append(no_combine_dims, combined_dim).tolist())\n a.rowrank = len(a.labels) - 1\n else:\n mapper = np.concatenate([idxs, idx_no_combine])\n a.bonds = np.append(a.bonds[idxs[0]], a.bonds[idx_no_combine])\n a.labels = np.append(a.labels[idxs[0]], a.labels[idx_no_combine])\n a.Storage = a.Storage.permute(mapper.tolist()).contiguous().view(\n np.append(combined_dim, no_combine_dims).tolist())\n a.rowrank = 1\n else:\n\n ## if the combine are BRA or KET\n contype_inout = np.unique(a.braket[idxs])\n if len(contype_inout) != 1:\n raise Exception(\"_CombineBonds\",\n \"[ERROR], label_to_combine should be all bra-bond or all ket-bond for tagged-nonsymm Tensor\")\n\n if permute_back:\n\n ##[Fusion tree] >>>\n new_Nin = a.rowrank\n # print(a.bonds)\n # print(a.bonds.shape)\n for i in range(len(idxs) - 1):\n if idxs[1 + i] < a.rowrank:\n new_Nin -= 1\n a.bonds[idxs[0]].combine(a.bonds[idxs[1 + i]])\n ## <<<\n\n mapper = np.concatenate([idxs, idx_no_combine])\n a.Storage = a.Storage.permute(mapper.tolist()).contiguous().view(\n np.append(combined_dim, no_combine_dims).tolist())\n\n f_label = a.labels[idxs[0]]\n a.bonds = np.delete(a.bonds, idxs[1:])\n a.labels = np.delete(a.labels, idxs[1:])\n a.braket = np.delete(a.braket, idxs[1:])\n\n x = np.argwhere(a.labels == f_label)\n final_mapper = np.insert(np.arange(1, len(a.bonds), 1).astype(np.int), x[0], 0)\n a.Stoarge = a.Storage.permute(final_mapper.tolist())\n\n a.rowrank = new_Nin\n else:\n\n ##[Fusion tree] >>>\n for i in range(len(idxs) - 1):\n a.bonds[idxs[0]].combine(a.bonds[idxs[1 + i]])\n ## <<<\n \n if idxs[0] >= a.rowrank:\n mapper = np.concatenate([idx_no_combine, idxs])\n a.bonds = np.append(a.bonds[idx_no_combine], a.bonds[idxs[0]])\n a.labels = np.append(a.labels[idx_no_combine], a.labels[idxs[0]])\n a.braket = np.append(a.braket[idx_no_combine], a.braket[idxs[0]])\n a.Storage = a.Storage.permute(mapper.tolist()).contiguous().view(\n np.append(no_combined_dims, combine_dim).tolist())\n a.rowrank = len(a.labels) - 1\n \n else:\n mapper = np.concatenate([idxs, idx_no_combine])\n a.bonds = np.append(a.bonds[idxs[0]], a.bonds[idx_no_combine])\n a.labels = np.append(a.labels[idxs[0]], a.labels[idx_no_combine])\n a.braket = np.append(a.braket[idxs[0]], a.braket[idx_no_combine])\n a.Storage = a.Storage.permute(mapper.tolist()).contiguous().view(\n np.append(combined_dim, no_combine_dims).tolist())\n a.rowrank = 1\n\n else:\n raise Exception(\"_CombineBonds(UniTensor,int_arr)\", \"[ERROR] )CombineBonds can only accept UniTensor\")\n\n\ndef _Randomize(a):\n \"\"\"\n @description: <private function> This function randomize a UniTensor.\n @params :\n a : UniTensor\n @return : N/A\n\n \"\"\"\n\n if isinstance(a, UniTensor):\n if a.is_symm:\n for s in range(len(a.Storage)):\n a.Storage[s] = torch.rand(a.Storage[s].shape, dtype=a.Storage[s].dtype, device=a.Storage[s].device)\n else:\n a.Storage = torch.rand(a.Storage.shape, dtype=a.Storage.dtype, device=a.Storage.device)\n\n\n else:\n raise Exception(\"_Randomize(UniTensor)\", \"[ERROR] _Randomize can only accept UniTensor\")\n\n\ndef From_torch(torch_tensor, rowrank=None, labels=None, is_tag=False):\n \"\"\"\n Construct UniTensor from torch.Tensor.\n\n If the input torch_tensor belongs to a autograd graph, the contructed UniTensor will preserve the role of the input torch_tensor in the computational graph.\n\n Args:\n torch_tensor:\n Torch.Tensor\n\n rowrank:\n int, The number of inbond. Note that the first [rowrank] bonds will be set to tor10.BD_IN, and the remaining bonds will be set to tor10.BD_OUT\n\n labels:\n python list or 1d numpy array, The labels for each bonds. If ignore, the constucted UniTensor will using the default labels for each bond.\n\n Return:\n UniTensor\n\n Example:\n\n >>> x = torch.ones(3,3)\n >>> print(x)\n tensor([[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]])\n\n >>> y = tor10.From_torch(x,rowrank=1,labels=[4,5])\n >>> y.Print_diagram()\n -----------------------\n tensor Name : \n tensor Rank : 2\n has_symmetry: False\n on device : cpu\n is_diag : False\n ------------- \n / \\ \n 4 ____| 3 3 |____ 5 \n \\ / \n ------------- \n\n >>> print(y)\n Tensor name:\n is_diag : False\n tensor([[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]])\n\n\n >>> x2 = torch.ones(3,4,requires_grad=True)\n >>> print(x2)\n tensor([[1., 1., 1., 1.],\n [1., 1., 1., 1.],\n [1., 1., 1., 1.]], requires_grad=True)\n\n >>> y2 = tor10.From_torch(x2,rowrank=1)\n >>> print(y2.requires_grad())\n True\n\n\n \"\"\"\n if not isinstance(torch_tensor, torch.Tensor):\n raise TypeError(\"From_torch\", \"[ERROR] can only accept torch.Tensor\")\n\n shape = torch_tensor.shape\n\n if rowrank is not None:\n if rowrank > len(shape) or rowrank < 0:\n raise ValueError(\"From_torch\", \"[ERROR] rowrank exceed the rank of input torch tensor.\")\n else:\n if len(shape) != 0:\n raise ValueError(\"From_torch\", \"[ERROR] rowrank must be set for a non rank-0 tensor\")\n\n if labels is not None:\n if len(labels) != len(shape):\n raise TypeError(\"From_torch\", \"[ERROR] # of labels should match the rank of torch.Tensor\")\n\n if is_tag:\n\n new_bonds = [Bond(shape[i], BD_KET) for i in range(rowrank)] + \\\n [Bond(shape[i], BD_BRA) for i in np.arange(rowrank, len(shape), 1)]\n else:\n new_bonds = [Bond(shape[i]) for i in range(len(shape))]\n\n if len(new_bonds) == 0:\n tmp = UniTensor(bonds=[], labels=[], rowrank=0, check=False)\n tmp._mac(torch_tensor=torch_tensor)\n return tmp\n else:\n tmp = UniTensor(bonds=new_bonds, labels=labels, rowrank=rowrank, check=False)\n tmp._mac(torch_tensor=torch_tensor)\n return tmp\n" ]
[ [ "torch.Size", "torch.device", "torch.rand", "torch.zeros", "torch.from_numpy", "torch.tensor", "torch.diag", "torch.matmul", "torch.tensordot" ] ]
cyx669521/Conv-TasNet-PyTorch
[ "64188ffa48971218fdd68b66906970f215d7eca2" ]
[ "steps/ipsm_oracle_statistic_log.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Northwestern Polytechnical University (author: Ke Wang)\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport os\nimport sys\n\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nsys.path.append(os.path.dirname(sys.path[0]) + '/utils')\nfrom sigproc.dsp import get_phase, wavread\nfrom sigproc.mask import ipsm_spectrum\nfrom sigproc.spectrum import spectrum\n\nEPSILON = np.finfo(np.float32).eps\nMAX_FLOAT = np.finfo(np.float32).max\nMIN_MASK = 0 - 10\nMAX_MASK = 1\n\nmix_wav_scp = 'data/tt/mix.scp'\ns1_wav_scp = 'data/tt/s1.scp'\ns2_wav_scp = 'data/tt/s2.scp'\n\nori_dir = 'data/2speakers/wav8k/min/tt'\nstatistic_dir = 'exp/ipsm_oracle_statistic_log'\n\nsample_rate = 8000\nframe_length = 32\nframe_shift = 8\nwindow_type = \"hanning\"\npreemphasis = 0.0\nsquare_root_window = True\n# do not change\nuse_log = True\nuse_power = False\n# do not change\n\nif not os.path.exists(statistic_dir):\n os.makedirs(statistic_dir)\n\nf_mix_wav = open(mix_wav_scp, \"r\")\nf_s1_wav = open(s1_wav_scp, \"r\")\nf_s2_wav = open(s2_wav_scp, \"r\")\n\nmix_wav = f_mix_wav.readlines()\ns1_wav = f_s1_wav.readlines()\ns2_wav = f_s2_wav.readlines()\n\nassert len(mix_wav) == len(s1_wav)\nassert len(s1_wav) == len(s2_wav)\n\n\ndef readwav(line):\n key, path = line.strip().split()\n wav, frame_rate = wavread(path)\n return key, wav\n\n\ndef compute_spectrum(line):\n key, wav = readwav(line)\n feat = spectrum(wav, sample_rate, frame_length, frame_shift,\n window_type, preemphasis, use_log, use_power,\n square_root_window)\n return key, feat\n\n\ndef compute_phase(line):\n key, wav = readwav(line)\n phase = get_phase(wav, sample_rate, frame_length, frame_shift, window_type,\n preemphasis, square_root_window)\n return phase\n\n\nmask_pool = np.empty(shape=(0,))\nfor i in range(len(mix_wav)):\n key_mix, feat_mix = compute_spectrum(mix_wav[i])\n key_s1, feat_s1 = compute_spectrum(s1_wav[i])\n key_s2, feat_s2 = compute_spectrum(s2_wav[i])\n assert key_mix == key_s1 and key_s1 == key_s2\n phase_mix = compute_phase(mix_wav[i])\n phase_s1 = compute_phase(s1_wav[i])\n phase_s2 = compute_phase(s2_wav[i])\n mask_s1 = ipsm_spectrum(feat_s1, feat_mix, phase_s1, phase_mix, use_log, use_power)\n mask_s2 = ipsm_spectrum(feat_s2, feat_mix, phase_s2, phase_mix, use_log, use_power)\n mask_s1 = np.log(np.clip(mask_s1, a_min=EPSILON, a_max=MAX_FLOAT))\n mask_s2 = np.log(np.clip(mask_s2, a_min=EPSILON, a_max=MAX_FLOAT))\n mask_s1 = np.clip(mask_s1.reshape(-1), a_min=MIN_MASK, a_max=MAX_MASK)\n mask_s2 = np.clip(mask_s2.reshape(-1), a_min=MIN_MASK, a_max=MAX_MASK)\n mask_pool = np.concatenate((mask_pool, mask_s1, mask_s2), axis=0)\nplt.hist(mask_pool, int((MAX_MASK - MIN_MASK) * 200))\nplt.title('Log IPSM Magnitudes (trucated to [{:d}, {:d}])'.format(int(MIN_MASK), int(MAX_MASK)))\nplt.savefig('{}.pdf'.format(statistic_dir + '/distribution'),\n format='pdf', bbox_inches='tight')\n\nf_mix_wav.close()\nf_s1_wav.close()\nf_s2_wav.close()\n" ]
[ [ "matplotlib.use", "numpy.concatenate", "numpy.empty", "numpy.finfo", "numpy.clip" ] ]
akanametov/NeuralStyleTransfer
[ "06efd9d201d0681dbeec3a806a340fc95a7aebfa" ]
[ "model/__init__.py" ]
[ "import torch\nimport torch.nn as nn\nfrom .utils import NormalizationLayer, ContentLossLayer, StyleLossLayer\n\nclass Compiler():\n def __init__(self, baseModel, contentLayerNames, styleLayerNames, device='cuda:0'):\n self.baseModel = baseModel.to(device)\n self.contentLayerNames = contentLayerNames\n self.styleLayerNames = styleLayerNames\n \n \n def compile(self, contentImage, styleImage, device='cuda:0'):\n contentImage = contentImage.to(device)\n styleImage = styleImage.to(device)\n contentLayers=[]\n styleLayers=[]\n model = nn.Sequential()\n model.add_module('norm', NormalizationLayer())\n i = 0\n for layer in self.baseModel.children():\n if isinstance(layer, nn.Conv2d):\n i += 1\n name = 'conv{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu{}'.format(i)\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'pool{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn{}'.format(i)\n else:\n raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))\n \n model.add_module(name, layer)\n \n if name in self.contentLayerNames:\n target = model(contentImage).detach()\n layer = ContentLossLayer(target)\n model.add_module(\"content{}\".format(i), layer)\n contentLayers.append(layer)\n\n if name in self.styleLayerNames:\n target = model(styleImage).detach()\n layer = StyleLossLayer(target)\n model.add_module(\"style{}\".format(i), layer)\n styleLayers.append(layer)\n for i in range(len(model) - 1, -1, -1):\n if isinstance(model[i], ContentLossLayer) or isinstance(model[i], StyleLossLayer):\n break\n model = model[:(i + 1)]\n return model, contentLayers, styleLayers\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU" ] ]
zhongsheng-chen/SDF_Converter
[ "1067981b1056c68c38182eb8a4c255b4f3ab7ebe" ]
[ "convert_sdf_utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Author: Zhongsheng Chen\n# Date: 10/10/2019\n# Copyright: Copyright 2019, Beijing University of Chemical Technology\n# License: The MIT License (MIT)\n# Email: [email protected]\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\nr\"\"\"A helper function for processing SDF-like dataset files from MoNA.\n\nFor dataset from Massbank of North America (MoNA), they are SDF-like files but exact SDF files. In the SDF-like file,\nsome lines in header sections was missing. So, these files can not used be loaded as a standard SDF file using RDKit\nTool. Specifications of a standard SDF file are given at https://en.wikipedia.org/wiki/Chemical_table_file.\nMolecule block are loaded and then append 'M END' and other lines to make sure it can be loaded as sdf.\n\nExample:\n $ python convert_sdf_utils.py \\\n --path_to_bad_sdf=/sdf/like/file/path \\\n --failed_block_file_name=/save/failed/block/to/file \\\n --output_dir=/save/path/to/converted/sdf \\\n --alsologtostderr\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom openbabel import pybel\nfrom rdkit import Chem\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('path_to_bad_sdf',\n 'test_dataset/test_mona_vf_npl.sdf',\n 'specify a relative path of a SDF-like file to convert as SDF file')\nflags.DEFINE_string('failed_block_file_name',\n '',\n 'specify a file to store failed molecule blocks')\nflags.DEFINE_string('output_dir',\n '',\n 'specify a directory for SDF files converted.')\n\n# required tags in sdf files.\nSDF_TAG_MASS_SPEC_PEAKS = 'MASS SPECTRAL PEAKS'\nSDF_TAG_INCHIKEY = 'INCHIKEY'\nSDF_TAG_INCHI = 'INCHI'\nSDF_TAG_NAME = 'NAME'\nSDF_TAG_MOLECULE_MASS = 'EXACT MASS'\n\nexpected_props = [SDF_TAG_MASS_SPEC_PEAKS,\n SDF_TAG_INCHIKEY,\n SDF_TAG_INCHI,\n SDF_TAG_NAME,\n SDF_TAG_MOLECULE_MASS]\n\n\ndef _make_mol_block_from_string(mol_str_in_lines):\n \"\"\" Make a molecule block from a string read by Chem.SDMolSupplier\n\n A valid molecule header should have three separated lines: title line, program line and counts line.\n The missing lines in the original dataset files are filled up with new blank lines (''). To restored molecules\n from a SDF data file, it needs to include 'M END' as molecule reading end flag. Due to google's\n Neural Electronโˆ’Ionization Mass Spectrometry (NEIMS) demanding InChIKey, I extracted InChI from\n the comment section in SDF data files, and then transfer them to the corresponding InChiKey,\n and write them back to molecule blocks in SDF data files.\n\n Args:\n mol_str_in_lines: List of string for molecule descriptions.\n\n Returns:\n A list of string for molecule block.\n \"\"\"\n\n block = []\n for line in mol_str_in_lines:\n if \"V2000\" in line: # insert a required lines in molecule header block.\n block.extend(['', line])\n elif \"> <NAME>\" in line: # mark M END as an token for a valid molecule.\n block.extend(['M END', line])\n elif \"$$$$\" in line: # add up two properties, including INCHIKEY and INCHI before ending a molecule block.\n inchikey = ''\n inchi = _get_prop_value_from_mol_block(block, SDF_TAG_INCHI)\n if inchi:\n inchikey = Chem.inchi.InchiToInchiKey('InChI=' + inchi)\n # logging.warning('InChI %s has a InChiKey %s', inchi, inchikey)\n # uncomment for logging InChi and its InChiKey\n block.extend(['> <%s>' % 'INCHI', inchi, ''])\n block.extend(['> <%s>' % SDF_TAG_INCHIKEY, inchikey, '', line])\n else:\n block.append(line)\n return block\n\n\ndef _make_mol_block_rational(mol_block):\n \"\"\" Make molecule blocks passed parse successfully.\n\n Note that although some missing lines and a molecule reading flag (M END) has been filled up,\n for some unknown reasons, considerable molecule blocks can not be successfully recognized as expected.\n I found that OpenBabel can calibrate those. The reason this phenomenon may\n lie in wrong values in their atom blocks values, making molecule blocks corrupted.\n\n Args:\n mol_block: A raw mol_block (list of string )\n\n Returns\n A molecule block whose atom block is calibrated, if molecule block is successfully loaded as sdf,\n otherwise, None.\n \"\"\"\n\n mol_str = '\\n'.join(mol_block)\n try:\n mol_obj = pybel.readstring('sdf', mol_str)\n except IOError:\n mol_obj = None\n\n return mol_obj.write('sdf').splitlines() if mol_obj is not None else None\n\n\ndef _has_prop_on_mol_block(block, prop_key):\n \"\"\" Check if a specific property exits in the molecule block.\n\n All properties sored in SDF data files include but not limited to\n SDF_TAG_MASS_SPEC_PEAKS, SDF_TAG_INCHIKEY,\n SDF_TAG_INCHI, SDF_TAG_NAME,\n SDF_TAG_MOLECULE_MASS\n\n Args:\n block: A molecule block (string).\n prop_key: A key of a property of molecule.\n\n Returns:\n True if has the specific property prop_key, otherwise, False.\n\n Raises:\n ValueError if prop_key does not match any element in the expected properties.\n \"\"\"\n\n if prop_key not in expected_props:\n raise ValueError('%s is not a supported property type.', prop_key)\n has_prop = False\n for line in block:\n if line.strip() == ('> <%s>' % prop_key):\n has_prop = True\n return has_prop\n\n\ndef _get_prop_value_from_mol_block(block, prop_key):\n \"\"\" Get the corresponding value for a specific property of any molecule.\n\n Assert that each InChI for a molecule is stored in the first line of COMMENT section of a SDF data file which\n start with 'InChI=InChI'.\n\n Args:\n block: A list of string to specify a molecule block.\n prop_key: A string to specify key of a property of molecules.\n\n Returns:\n value of the specific property prop_key. if the specific property of the molecule block does not find,\n return ''.\n \"\"\"\n\n if _has_prop_on_mol_block(block, prop_key) or prop_key == SDF_TAG_INCHI:\n prop_key_full_name = ('> <%s>' % prop_key.upper())\n comment_key_full_name = '> <COMMENT>'\n prop_value = ''\n if prop_key_full_name == '> <INCHI>':\n prop_key_full_name = '> <COMMENT>'\n for ind, line in enumerate(block):\n\n if prop_key_full_name == comment_key_full_name and prop_key_full_name == line.strip():\n if block[ind + 1].startswith('InChI='):\n prop_value = block[ind + 1].strip('InChI=')\n break\n elif prop_key_full_name == line.strip():\n prop_value = block[ind + 1].strip('')\n break\n return prop_value\n\n\ndef _write_mol_block_to_file(save_to_path, mol_block_list):\n \"\"\" Write a molecule block to a file\"\"\"\n\n def __mol_block_writer(mode):\n with tf.gfile.Open(save_to_path, mode) as writer:\n for line in mol_block:\n writer.write('%s\\n' % line)\n\n for index, mol_block in enumerate(mol_block_list):\n if index == 0:\n __mol_block_writer(mode='w')\n else:\n __mol_block_writer(mode='a')\n\n\ndef _check_mol_block_has_all_prop(mol_block):\n \"\"\" Check if all the considered properties exist in the molecule block.\"\"\"\n prop_check_status = []\n for mol_prop_key in expected_props:\n prop_check_status.append(_has_prop_on_mol_block(mol_block, mol_prop_key))\n\n # I will check if each block of every molecule has all the tags:\n # 'MASS SPECTRAL PEAKS', 'INCHIKEY', 'INCHI', NAME', 'EXACT MASS'\n return np.all(prop_check_status)\n\n\ndef _max_atoms_in_mol_block(mol_block_list):\n max_num_atoms = -1024\n for mol_block in mol_block_list:\n mol_str = '\\n'.join(mol_block)\n mol = pybel.readstring('sdf', mol_str)\n if len(mol.atoms) > max_num_atoms:\n max_num_atoms = len(mol.atoms)\n return max(max_num_atoms, 0)\n\n\ndef convert_to_sdf(path_to_bad_sdf, failed_block_file_name=None, output_dir=None):\n \"\"\" Make a sdf-like data file converted to sdf.\n\n Note that to form a proper molecule block, some missing lines in header of original molecule string read from\n sdf-like data files are filled up. Moreover, InChI will be taken to convert to its InChIKey. Both\n InChI and InChIKey are taken as properties and then append them leading to four dollar signs ($$$$)\n in each molecule string.\n\n Args:\n path_to_bad_sdf: Path to load a sdf-like data file.\n failed_block_file_name: Specify a file to failed molecule blocks, if set. By default (None), failed molecule\n blocks are skipped, no failed molecule block written to file.\n output_dir: Directory to the converted sdf file if set. By default (None), the directory of the sdf-like data\n file will be set as output directory.\n Returns:\n valid_mol_block_list: A list of molecular blocks converted successfully\n failed_mol_block_list: A list of molecular blocks corrupted.\n num_valid_mol_block: How many numbers of molecular blocks converted successfully.\n num_failed_mol_block: How many numbers of molecular blocks are so corrupted that they can not be converted.\n max_num_atoms: Maximum number of atoms in those molecules converted successfully.\n \"\"\"\n\n suppl = Chem.SDMolSupplier(path_to_bad_sdf)\n\n mol_block_list = []\n logging.warning('Converting started ...')\n for index in range(len(suppl)):\n # all string leading to each four dollar signs($$$$) will take as molecule-related string.\n mol_str = suppl.GetItemText(index).strip().splitlines()\n mol_block = _make_mol_block_rational(_make_mol_block_from_string(mol_str))\n\n if mol_block is not None:\n mol_block_list.append(mol_block)\n\n num_failed = 0\n valid_mol_block_list = []\n failed_mol_block_list = []\n for mol_block in mol_block_list:\n if _check_mol_block_has_all_prop(mol_block):\n valid_mol_block_list.append(mol_block)\n else:\n num_failed += 1\n failed_mol_block_list.append(mol_block)\n\n out_dir, sdf_name = os.path.split(path_to_bad_sdf)\n if output_dir is '':\n output_dir = os.path.abspath(out_dir)\n output_dir = os.path.abspath(output_dir)\n save_valid_mol_block_to_path = os.path.join(output_dir, ('converted_%s' % sdf_name))\n save_failed_mol_block_to_path = os.path.join(output_dir, failed_block_file_name)\n\n max_num_atoms = 0\n if valid_mol_block_list:\n _write_mol_block_to_file(save_valid_mol_block_to_path, valid_mol_block_list)\n max_num_atoms = _max_atoms_in_mol_block(valid_mol_block_list)\n if failed_block_file_name and failed_mol_block_list:\n _write_mol_block_to_file(save_failed_mol_block_to_path, failed_mol_block_list)\n num_valid_mol_block = len(valid_mol_block_list)\n num_failed_mol_block = len(failed_mol_block_list)\n logging.warning(('Processing on %s from Massbank of North America (MoNA) finished. '\n 'Except for %d failed molecule blocks, totally, '\n '%d molecules have been converted to a read-friendly SDF saved in the path %s. '\n 'The maximum number of atoms among these molecules is %d.'),\n sdf_name, num_failed_mol_block, num_valid_mol_block, save_valid_mol_block_to_path, max_num_atoms)\n return valid_mol_block_list, failed_mol_block_list, num_valid_mol_block, num_failed_mol_block, max_num_atoms\n\n\ndef main(_):\n tf.gfile.MkDir(FLAGS.output_dir)\n convert_to_sdf(FLAGS.path_to_bad_sdf, FLAGS.failed_block_file_name, FLAGS.output_dir)\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "numpy.all", "tensorflow.gfile.MkDir", "tensorflow.gfile.Open" ] ]
rajivranjanbuff/CSE-555-Project-4-Neural-Networks-and-Deep-Learning
[ "719672defdec5bd8500d8b142316f93ed0d3fba3" ]
[ "Assign4_5/two_hidden_layer.py" ]
[ "import numpy\nimport tensorflow as tf\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.utils import np_utils\nfrom keras.optimizers import SGD\nimport matplotlib.pyplot as plt\nimport keras\nimport keras.backend as K\nfrom keras.callbacks import LambdaCallback\n\n\n\nseed = 7\nnumpy.random.seed(seed)\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n#X_train= X_train.reshape(60000,784)\n#X_test= X_test.reshape(60000,784)\n\n\n# flatten 28*28 images to a 784 vector for each image\nnum_pixels = X_train.shape[1] * X_train.shape[2]\nX_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')\n\n#for i in range(0,9):\nX0=np.zeros(shape=(100,784))\nX00=X_train[y_train[:,]==0]\nX0=X00[0:100,:]\n\nX1=np.zeros(shape=(100,784))\nX11=X_train[y_train[:,]==1]\nX1=X11[0:100,:]\n\nX2=np.zeros(shape=(100,784))\nX22=X_train[y_train[:,]==2]\nX2=X22[0:100,:]\n\nX3=np.zeros(shape=(100,784))\nX33=X_train[y_train[:,]==3]\nX3=X33[0:100,:]\n\nX4=np.zeros(shape=(100,784))\nX44=X_train[y_train[:,]==4]\nX4=X44[0:100,:]\n\nX5=np.zeros(shape=(100,784))\nX55=X_train[y_train[:,]==5]\nX5=X55[0:100,:]\n\nX6=np.zeros(shape=(100,784))\nX66=X_train[y_train[:,]==6]\nX6=X66[0:100,:]\n\nX7=np.zeros(shape=(100,784))\nX77=X_train[y_train[:,]==7]\nX7=X77[0:100,:]\n\nX8=np.zeros(shape=(100,784))\nX88=X_train[y_train[:,]==8]\nX8=X88[0:100,:]\n\nX9=np.zeros(shape=(100,784))\nX99=X_train[y_train[:,]==9]\nX9=X99[0:100,:]\n\n#X_1000_train=[np.zeros(shape=(1000,784))\nX_train = np.concatenate((X0,X1,X2,X3,X4,X5,X6,X7,X8,X9))\n\n\nX0t=np.zeros(shape=(100,784))\nX00t=X_test[y_test[:,]==0]\nX0t=X00t[0:100,:]\n\nX1t=np.zeros(shape=(100,784))\nX11t=X_test[y_test[:,]==1]\nX1t=X11t[0:100,:]\n\nX2t=np.zeros(shape=(100,784))\nX22t=X_test[y_test[:,]==2]\nX2t=X22t[0:100,:]\n\nX3t=np.zeros(shape=(100,784))\nX33t=X_test[y_test[:,]==3]\nX3t=X33t[0:100,:]\n\nX4t=np.zeros(shape=(100,784))\nX44t=X_test[y_test[:,]==4]\nX4t=X44t[0:100,:]\n\nX5t=np.zeros(shape=(100,784))\nX55t=X_test[y_test[:,]==5]\nX5t=X55t[0:100,:]\n\nX6t=np.zeros(shape=(100,784))\nX66t=X_test[y_test[:,]==6]\nX6t=X66t[0:100,:]\n\nX7t=np.zeros(shape=(100,784))\nX77t=X_test[y_test[:,]==7]\nX7t=X77t[0:100,:]\n\nX8t=np.zeros(shape=(100,784))\nX88t=X_test[y_test[:,]==8]\nX8t=X88t[0:100,:]\n\nX9t=np.zeros(shape=(100,784))\nX99t=X_test[y_test[:,]==9]\nX9t=X99t[0:100,:]\n\n#X_1000_train=[np.zeros(shape=(1000,784))\n\nX_test = np.concatenate((X0t,X1t,X2t,X3t,X4t,X5t,X6t,X7t,X8t,X9t))\n\ny_test = np.zeros(1000,)\n\nfor i in range(0,10):\n for j in range(0,100):\n y_test[(100*i)+j,]=i\n \ny_train = np.zeros(1000,)\n\nfor i in range(0,10):\n for j in range(0,100):\n y_train[(100*i)+j,]=i\n\n\n# normalize inputs from 0-255 to 0-1\nX_train = X_train / 255\nX_test = X_test / 255\n\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n\nlearning_rate=[]\nclass SGDLearningRateTracker(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n optimizer = self.model.optimizer\n _lr = tf.to_float(optimizer.lr, name='ToFloat')\n _decay = tf.to_float(optimizer.decay, name='ToFloat')\n _iter = tf.to_float(optimizer.iterations, name='ToFloat')\n \n lr = K.eval(_lr * (1. / (1. + _decay * _iter)))\n learning_rate.append(lr)\n print(' - LR: {:.6f}\\n'.format(lr))\n#collecting test loss and accuracy in an array \nloss_collected_test=[]\nacc_collected_test=[]\nclass TestCallback_test(keras.callbacks.Callback):\n def __init__(self, test_data):\n self.test_data = test_data\n\n def on_epoch_end(self, epoch, logs={}):\n x, y = self.test_data\n loss, acc = self.model.evaluate(x, y, verbose=0)\n loss_collected_test.append(loss)\n acc_collected_test.append(acc)\n \n#collecting train loss and accuracy in an array \nloss_collected_train=[]\nacc_collected_train=[]\nclass TestCallback_train(keras.callbacks.Callback):\n def __init__(self, test_data):\n self.test_data = test_data\n\n def on_epoch_end(self, epoch, logs={}):\n x, y = self.test_data\n loss, acc = self.model.evaluate(x, y, verbose=0)\n loss_collected_train.append(loss)\n acc_collected_train.append(acc)\n \n# define baseline model\ndef baseline_model():\n\t# create model\n model = Sequential()\n model.add(Dense(30, input_dim=num_pixels, kernel_initializer='normal', activation='sigmoid'))\n model.add(Dense(30, kernel_initializer='normal', activation='sigmoid'))\n model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))\n model.summary()\n model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.1,decay=0.00001), metrics=['accuracy'])\n return model\n\n# build the model\nmodel = baseline_model()\n# Fit the model\n\nprint_weights = LambdaCallback(on_epoch_end=lambda batch, logs: print(model.layers[0].get_weights()))\n\ncallbacks_list = [SGDLearningRateTracker(),TestCallback_test((X_test, y_test)),TestCallback_train((X_train, y_train))]\n\n\nhistory=model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=30, batch_size=10,\nverbose=2,callbacks = callbacks_list)\n\n\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))\n\n\n# list all data in history\nprint(history.history.keys())\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Training/testing data accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('criterion function on training/testing data set')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n#plotting learning speed \nEpoch_graph=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\nplt.plot(Epoch_graph,learning_rate)\nplt.xlabel('Epoch')\nplt.ylabel('Learning Speed')\nplt.title('learning speed of the hidden layer')\nplt.show()\n\n#plotting errors\n#test error\nplt.plot(Epoch_graph,loss_collected_test)\nplt.xlabel('Epoch')\nplt.ylabel('Test_error')\nplt.title('Test Error')\nplt.show()\n\n#train error\nplt.plot(Epoch_graph,loss_collected_train)\nplt.xlabel('Epoch')\nplt.ylabel('Train_error')\nplt.title('Train Error')\nplt.show()\n\n\n\n" ]
[ [ "numpy.concatenate", "numpy.zeros", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "tensorflow.to_float", "matplotlib.pyplot.show" ] ]
Pankaj-Baranwal/Reinforcement-learning-with-tensorflow
[ "cf738d3e975aa9d2384dcd1a65dbdd156ddd970f" ]
[ "contents/1_command_line_reinforcement_learning/treasure_on_right.py" ]
[ "\"\"\"\nA simple example for Reinforcement Learning using table lookup Q-learning method.\nAn agent \"o\" is on the left of a 1 dimensional world, the treasure is on the rightmost location.\nRun this program and to see how the agent will improve its strategy of finding the treasure.\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\nA better explanation is available at\nhttps://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0\n\"\"\"\n\n\"\"\"\nNotes:\nUnlike policy gradient methods, which attempt to learn functions which directly map an observation to an action, Q-Learning attempts to learn the value of being in a given state, and taking a specific action there.\n\nQ learning helps learn long term expected rewards\n\nIn itโ€™s simplest implementation, Q-Learning is a table of values for every state (row) and action (column) possible in the environment. Within each cell of the table, we learn a value for how good it is to take a given action within a given state. In the case of the FrozenLake environment (OpenAI), we have 16 possible states (one for each block), and 4 possible actions (the four directions of movement), giving us a 16x4 table of Q-values. We start by initializing the table to be uniform (all zeros), and then as we observe the rewards we obtain for various actions, we update the table accordingly.\nFor making updates to Q-table values, we use Bellman equation:\n Q(s,a) = r + ฮณ(max(Q(sโ€™,aโ€™))\nThis says that the Q-value for a given state (s) and action (a) should represent the current reward (r) plus the maximum discounted (ฮณ) future reward expected according to our own table for the next state (sโ€™) we would end up in. The discount variable allows us to decide how important the possible future rewards are compared to the present reward. By updating in this way, the table slowly begins to obtain accurate measures of the expected future reward for a given action in a given state.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport time\n\nnp.random.seed(2) # reproducible\n\nN_STATES = 6 # the length of the 1 dimensional world\nACTIONS = ['left', 'right'] # available actions\nEPSILON = 0.9 # greedy police. There is 0.1 probability of randomness so that agent may be able to explore the world and find robust solutions\nALPHA = 0.1 # learning rate\nGAMMA = 0.9 # discount factor. discount variable allows us to decide how important the possible future rewards are compared to the present reward.\nMAX_EPISODES = 13 # maximum episodes\nFRESH_TIME = 0.2 # fresh time for one move\n\n\ndef build_q_table(n_states, actions):\n \"\"\"\n Initialize a zero-valued q-table of states and actions\n \"\"\"\n table = pd.DataFrame(\n np.zeros((n_states, len(actions))), # q_table initial values\n columns=actions, # actions's name\n )\n # print(table) # show table\n return table\n\n\ndef choose_action(state, q_table):\n \"\"\"\n Decide on the next move.\n Act non-greedily every now and then,\n or explore arena if unexplored,\n else choose the state with maximum reward\n \"\"\"\n # This is how to choose an action\n state_actions = q_table.iloc[state, :]\n if (np.random.uniform() > EPSILON) or ((state_actions == 0).all()): # act non-greedy or state-action have no value (unexplored arena)\n action_name = np.random.choice(ACTIONS)\n else: # act greedy\n action_name = state_actions.idxmax() # replace argmax to idxmax as argmax means a different function in newer version of pandas\n return action_name\n\n\ndef get_env_feedback(S, A):\n # This is how agent will interact with the environment\n if A == 'right': # move right\n if S == N_STATES - 2: # terminate\n S_ = 'terminal'\n R = 1\n else:\n S_ = S + 1\n R = 0\n else: # move left\n R = 0\n if S == 0:\n S_ = S # reach the wall\n else:\n S_ = S - 1\n # New state and reward obtained\n return S_, R\n\n\ndef update_env(S, episode, step_counter):\n # This is how environment is updated\n env_list = ['-']*(N_STATES-1) + ['T'] # '---------T' our environment\n if S == 'terminal':\n interaction = 'Episode %s: total_steps = %s' % (episode+1, step_counter)\n print('\\r{}'.format(interaction), end='')\n time.sleep(2)\n print('\\r ', end='')\n else:\n env_list[S] = 'o'\n interaction = ''.join(env_list)\n print('\\r{}'.format(interaction), end='')\n time.sleep(FRESH_TIME)\n\n\ndef update_q_table(q_table, S, A, S_, R):\n \"\"\"\n Bellman equation\n \"\"\"\n is_terminated = False\n q_predict = q_table.loc[S, A]\n if S_ != 'terminal':\n q_target = R + GAMMA * q_table.iloc[S_, :].max() # next state is not terminal\n else:\n q_target = R # next state is terminal\n is_terminated = True # terminate this episode\n\n q_table.loc[S, A] += ALPHA * (q_target - q_predict) # update\n return q_table, S_, is_terminated\n\n\ndef rl():\n # main part of RL loop\n q_table = build_q_table(N_STATES, ACTIONS)\n for episode in range(MAX_EPISODES):\n step_counter = 0\n S = 0\n is_terminated = False\n update_env(S, episode, step_counter)\n while not is_terminated:\n\n A = choose_action(S, q_table)\n S_, R = get_env_feedback(S, A) # take action & get next state and reward\n q_table, S, is_terminated = update_q_table(q_table, S, A, S_, R) # move to next state\n\n update_env(S, episode, step_counter+1)\n step_counter += 1\n return q_table\n\n\nif __name__ == \"__main__\":\n q_table = rl()\n print('\\r\\nQ-table:\\n')\n print(q_table)\n" ]
[ [ "numpy.random.seed", "numpy.random.uniform", "numpy.random.choice" ] ]
allesklarbeidir/rio-tiler-mvt
[ "8e77435640b02a6098902f63426fdfc72b2bbcb9" ]
[ "setup.py" ]
[ "\"\"\"Setup for rio-tiler-mvt.\"\"\"\n\nfrom setuptools.extension import Extension\nfrom setuptools import setup, find_packages\nfrom Cython.Build import cythonize\n\nimport numpy\n\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\ninst_reqs = [\"numpy\", \"vtzero\"]\n\nvt = \"vector-tile-base @ git+https://github.com/mapbox/vector-tile-base.git\"\nextra_reqs = {\n \"test\": [vt, \"rio-tiler\", \"pytest\", \"pytest-cov\"],\n \"dev\": [vt, \"rio-tiler\", \"pytest\", \"pytest-cov\", \"pre-commit\"],\n}\n\next_options = {\"include_dirs\": [numpy.get_include()]}\next_modules = cythonize(\n [Extension(\"rio_tiler_mvt.mvt\", [\"rio_tiler_mvt/mvt.pyx\"], **ext_options)]\n)\n\nsetup(\n name=\"rio-tiler-mvt\",\n version=\"0.0.1dev1\",\n description=u\"\"\"A rio-tiler plugin to encode tile array to MVT\"\"\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n python_requires=\">=3\",\n classifiers=[\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n ],\n keywords=\"COG MVT mapbox vectortile GIS\",\n author=u\"Vincent Sarago\",\n author_email=\"[email protected]\",\n url=\"https://github.com/cogeotiff/rio-tiler-mvt\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"ez_setup\", \"examples\", \"tests\"]),\n include_package_data=True,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require=extra_reqs,\n ext_modules=ext_modules,\n)\n" ]
[ [ "numpy.get_include" ] ]
daniel-de-vries/OpenMDAO
[ "c75c0cc5d116cad7960ae17f5a20e3520398faca" ]
[ "openmdao/core/tests/test_distribcomp.py" ]
[ "from __future__ import print_function\n\nimport unittest\nimport time\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.utils.mpi import MPI\nfrom openmdao.utils.array_utils import evenly_distrib_idxs, take_nth\nfrom openmdao.utils.assert_utils import assert_rel_error, assert_warning\n\ntry:\n from openmdao.vectors.petsc_vector import PETScVector\nexcept ImportError:\n PETScVector = None\n\nif MPI:\n rank = MPI.COMM_WORLD.rank\n commsize = MPI.COMM_WORLD.size\nelse:\n rank = 0\n commsize = 1\n\n\nclass InOutArrayComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options.declare('arr_size', types=int, default=10,\n desc=\"Size of input and output vectors.\")\n\n self.options.declare('delay', types=float, default=.01,\n desc=\"Time to sleep in compute function.\")\n\n def setup(self):\n arr_size = self.options['arr_size']\n\n self.add_input('invec', np.ones(arr_size, float))\n self.add_output('outvec', np.ones(arr_size, float))\n\n def compute(self, inputs, outputs):\n time.sleep(self.options['delay'])\n outputs['outvec'] = inputs['invec'] * 2.\n\n\nclass DistribCompSimple(om.ExplicitComponent):\n \"\"\"Uses 2 procs but takes full input vars\"\"\"\n\n def initialize(self):\n self.options['distributed'] = True\n\n self.options.declare('arr_size', types=int, default=10,\n desc=\"Size of input and output vectors.\")\n\n def setup(self):\n arr_size = self.options['arr_size']\n\n self.add_input('invec', np.ones(arr_size, float))\n self.add_output('outvec', np.ones(arr_size, float))\n\n def compute(self, inputs, outputs):\n if MPI and self.comm != MPI.COMM_NULL:\n if rank == 0:\n outvec = inputs['invec'] * 0.25\n elif rank == 1:\n outvec = inputs['invec'] * 0.5\n\n # now combine vecs from different processes\n both = np.zeros((2, len(outvec)))\n self.comm.Allgather(outvec, both)\n\n # add both together to get our output\n outputs['outvec'] = both[0, :] + both[1, :]\n else:\n outputs['outvec'] = inputs['invec'] * 0.75\n\n\nclass DistribInputComp(om.ExplicitComponent):\n \"\"\"Uses all procs and takes input var slices\"\"\"\n\n def initialize(self):\n self.options['distributed'] = True\n\n self.options.declare('arr_size', types=int, default=11,\n desc=\"Size of input and output vectors.\")\n\n def compute(self, inputs, outputs):\n if MPI:\n self.comm.Allgatherv(inputs['invec']*2.0,\n [outputs['outvec'], self.sizes,\n self.offsets, MPI.DOUBLE])\n else:\n outputs['outvec'] = inputs['invec'] * 2.0\n\n def setup(self):\n comm = self.comm\n rank = comm.rank\n\n arr_size = self.options['arr_size']\n\n self.sizes, self.offsets = evenly_distrib_idxs(comm.size, arr_size)\n start = self.offsets[rank]\n end = start + self.sizes[rank]\n\n self.add_input('invec', np.ones(self.sizes[rank], float),\n src_indices=np.arange(start, end, dtype=int))\n self.add_output('outvec', np.ones(arr_size, float), shape=np.int32(arr_size))\n\n\nclass DistribOverlappingInputComp(om.ExplicitComponent):\n \"\"\"Uses 2 procs and takes input var slices\"\"\"\n\n def initialize(self):\n self.options['distributed'] = True\n\n self.options.declare('arr_size', types=int, default=11,\n desc=\"Size of input and output vectors.\")\n\n def compute(self, inputs, outputs):\n outputs['outvec'][:] = 0\n if MPI:\n outs = self.comm.allgather(inputs['invec'] * 2.0)\n outputs['outvec'][:8] = outs[0]\n outputs['outvec'][4:11] += outs[1]\n else:\n outs = inputs['invec'] * 2.0\n outputs['outvec'][:8] = outs[:8]\n outputs['outvec'][4:11] += outs[4:11]\n\n def setup(self):\n \"\"\" component declares the local sizes and sets initial values\n for all distributed inputs and outputs\"\"\"\n\n comm = self.comm\n rank = comm.rank\n\n arr_size = self.options['arr_size']\n\n # need to initialize the input to have the correct local size\n if rank == 0:\n size = 8\n start = 0\n end = 8\n else:\n size = 7\n start = 4\n end = 11\n\n self.add_output('outvec', np.zeros(arr_size, float))\n self.add_input('invec', np.ones(size, float),\n src_indices=np.arange(start, end, dtype=int))\n\n\nclass DistribInputDistribOutputComp(om.ExplicitComponent):\n \"\"\"Uses 2 procs and takes input var slices.\"\"\"\n\n def initialize(self):\n self.options['distributed'] = True\n\n self.options.declare('arr_size', types=int, default=11,\n desc=\"Size of input and output vectors.\")\n\n def compute(self, inputs, outputs):\n outputs['outvec'] = inputs['invec']*2.0\n\n def setup(self):\n\n comm = self.comm\n rank = comm.rank\n\n arr_size = self.options['arr_size']\n\n sizes, offsets = evenly_distrib_idxs(comm.size, arr_size)\n self.sizes = sizes\n self.offsets = offsets\n\n start = offsets[rank]\n end = start + sizes[rank]\n\n self.add_input('invec', np.ones(sizes[rank], float),\n src_indices=np.arange(start, end, dtype=int))\n self.add_output('outvec', np.ones(sizes[rank], float))\n\n\nclass DistribInputDistribOutputDiscreteComp(DistribInputDistribOutputComp):\n\n def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):\n super(DistribInputDistribOutputDiscreteComp, self).compute(inputs, outputs)\n discrete_outputs['disc_out'] = discrete_inputs['disc_in'] + 'bar'\n\n def setup(self):\n super(DistribInputDistribOutputDiscreteComp, self).setup()\n self.add_discrete_input('disc_in', 'foo')\n self.add_discrete_output('disc_out', 'foobar')\n\n\n\nclass DistribNoncontiguousComp(om.ExplicitComponent):\n \"\"\"Uses 2 procs and takes non-contiguous input var slices and has output\n var slices as well\n \"\"\"\n\n def initialize(self):\n self.options['distributed'] = True\n\n self.options.declare('arr_size', types=int, default=11,\n desc=\"Size of input and output vectors.\")\n\n def compute(self, inputs, outputs):\n outputs['outvec'] = inputs['invec']*2.0\n\n def setup(self):\n\n comm = self.comm\n rank = comm.rank\n\n arr_size = self.options['arr_size']\n\n idxs = list(take_nth(rank, comm.size, range(arr_size)))\n\n self.add_input('invec', np.ones(len(idxs), float),\n src_indices=idxs)\n self.add_output('outvec', np.ones(len(idxs), float))\n\n\nclass DistribGatherComp(om.ExplicitComponent):\n \"\"\"Uses 2 procs gathers a distrib input into a full output\"\"\"\n\n def initialize(self):\n self.options['distributed'] = True\n\n self.options.declare('arr_size', types=int, default=11,\n desc=\"Size of input and output vectors.\")\n\n def compute(self, inputs, outputs):\n if MPI:\n self.comm.Allgatherv(inputs['invec'],\n [outputs['outvec'], self.sizes,\n self.offsets, MPI.DOUBLE])\n else:\n outputs['outvec'] = inputs['invec']\n\n def setup(self):\n\n comm = self.comm\n rank = comm.rank\n\n arr_size = self.options['arr_size']\n\n self.sizes, self.offsets = evenly_distrib_idxs(comm.size,\n arr_size)\n start = self.offsets[rank]\n end = start + self.sizes[rank]\n\n # need to initialize the variable to have the correct local size\n self.add_input('invec', np.ones(self.sizes[rank], float),\n src_indices=np.arange(start, end, dtype=int))\n self.add_output('outvec', np.ones(arr_size, float))\n\n\nclass NonDistribGatherComp(om.ExplicitComponent):\n \"\"\"Uses 2 procs gathers a distrib output into a full input\"\"\"\n\n def initialize(self):\n self.options.declare('size', types=int, default=1,\n desc=\"Size of input and output vectors.\")\n\n def setup(self):\n size = self.options['size']\n\n self.add_input('invec', np.ones(size, float))\n self.add_output('outvec', np.ones(size, float))\n\n def compute(self, inputs, outputs):\n outputs['outvec'] = inputs['invec']\n\n\[email protected](PETScVector is not None, \"Only runs when PETSc is not available\")\nclass NOMPITests(unittest.TestCase):\n\n def test_distrib_idx_in_full_out(self):\n size = 11\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribInputComp(arr_size=size))\n top.connect('C1.outvec', 'C2.invec')\n\n msg = \"The 'distributed' option is set to True for Component C2, \" \\\n \"but there is no distributed vector implementation (MPI/PETSc) \" \\\n \"available. The default non-distributed vectors will be used.\"\n\n with assert_warning(UserWarning, msg):\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n C1._inputs['invec'] = np.array(range(size, 0, -1), float)\n\n p.run_model()\n\n self.assertTrue(all(C2._outputs['outvec'] == np.array(range(size, 0, -1), float)*4))\n\n\[email protected](PETScVector, \"PETSc is required.\")\nclass MPITests(unittest.TestCase):\n\n N_PROCS = 2\n\n def test_distrib_full_in_out(self):\n size = 11\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribCompSimple(arr_size=size))\n top.connect('C1.outvec', 'C2.invec')\n\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n C1._inputs['invec'] = np.ones(size, float) * 5.0\n\n p.run_model()\n\n self.assertTrue(all(C2._outputs['outvec'] == np.ones(size, float)*7.5))\n\n def test_distrib_idx_in_full_out(self):\n size = 11\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribInputComp(arr_size=size))\n top.connect('C1.outvec', 'C2.invec')\n\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n C1._inputs['invec'] = np.array(range(size, 0, -1), float)\n\n p.run_model()\n\n self.assertTrue(all(C2._outputs['outvec'] == np.array(range(size, 0, -1), float)*4))\n\n def test_distrib_1D_dist_output(self):\n size = 11\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribInputComp(arr_size=size))\n C3 = top.add_subsystem(\"C3\", om.ExecComp(\"y=x\", x=np.zeros(size*commsize),\n y=np.zeros(size*commsize)))\n top.connect('C1.outvec', 'C2.invec')\n top.connect('C2.outvec', 'C3.x')\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n C1._inputs['invec'] = np.array(range(size, 0, -1), float)\n\n p.run_model()\n\n self.assertTrue(all(C2._outputs['outvec'] == np.array(range(size, 0, -1), float)*4))\n\n def test_distrib_idx_in_distrb_idx_out(self):\n # normal comp to distrib comp to distrb gather comp\n size = 3\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribInputDistribOutputComp(arr_size=size))\n C3 = top.add_subsystem(\"C3\", DistribGatherComp(arr_size=size))\n top.connect('C1.outvec', 'C2.invec')\n top.connect('C2.outvec', 'C3.invec')\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n C1._inputs['invec'] = np.array(range(size, 0, -1), float)\n\n p.run_model()\n\n self.assertTrue(all(C3._outputs['outvec'] == np.array(range(size, 0, -1), float)*4))\n\n def test_noncontiguous_idxs(self):\n # take even input indices in 0 rank and odd ones in 1 rank\n size = 11\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribNoncontiguousComp(arr_size=size))\n C3 = top.add_subsystem(\"C3\", DistribGatherComp(arr_size=size))\n top.connect('C1.outvec', 'C2.invec')\n top.connect('C2.outvec', 'C3.invec')\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n C1._inputs['invec'] = np.array(range(size), float)\n\n p.run_model()\n\n if MPI:\n if p.comm.rank == 0:\n self.assertTrue(all(C2._outputs['outvec'] ==\n np.array(list(take_nth(0, 2, range(size))), 'f')*4))\n else:\n self.assertTrue(all(C2._outputs['outvec'] ==\n np.array(list(take_nth(1, 2, range(size))), 'f')*4))\n\n full_list = list(take_nth(0, 2, range(size))) + list(take_nth(1, 2, range(size)))\n self.assertTrue(all(C3._outputs['outvec'] == np.array(full_list, 'f')*4))\n else:\n self.assertTrue(all(C2._outputs['outvec'] == C1._outputs['outvec']*2.))\n self.assertTrue(all(C3._outputs['outvec'] == C2._outputs['outvec']))\n\n @unittest.skipUnless(MPI, \"MPI is not active.\")\n def test_overlapping_inputs_idxs(self):\n # distrib comp with src_indices that overlap, i.e. the same\n # entries are distributed to multiple processes\n size = 11\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribOverlappingInputComp(arr_size=size))\n top.connect('C1.outvec', 'C2.invec')\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n input_vec = np.array(range(size, 0, -1), float)\n C1._inputs['invec'] = input_vec\n\n # C1 (an InOutArrayComp) doubles the input_vec\n check_vec = input_vec * 2\n\n p.run_model()\n\n np.testing.assert_allclose(C2._outputs['outvec'][:4], check_vec[:4]*2)\n np.testing.assert_allclose(C2._outputs['outvec'][8:], check_vec[8:]*2)\n\n # overlapping part should be double size of the rest\n np.testing.assert_allclose(C2._outputs['outvec'][4:8], check_vec[4:8]*4)\n\n np.testing.assert_allclose(p.get_val('C2.invec', get_remote=True),\n np.hstack((check_vec[0:8], check_vec[4:11])))\n\n dist_out = p.get_val('C2.outvec', get_remote=True)\n np.testing.assert_allclose(dist_out[:11], dist_out[11:])\n np.testing.assert_allclose(dist_out[:4], check_vec[:4] * 2)\n np.testing.assert_allclose(dist_out[8:11], check_vec[8:] * 2)\n np.testing.assert_allclose(dist_out[4:8], check_vec[4:8] * 4)\n\n def test_nondistrib_gather(self):\n # regular comp --> distrib comp --> regular comp. last comp should\n # automagically gather the full vector without declaring src_indices\n size = 11\n\n p = om.Problem()\n top = p.model\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n C2 = top.add_subsystem(\"C2\", DistribInputDistribOutputComp(arr_size=size))\n C3 = top.add_subsystem(\"C3\", NonDistribGatherComp(size=size))\n top.connect('C1.outvec', 'C2.invec')\n top.connect('C2.outvec', 'C3.invec')\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n C1._inputs['invec'] = np.array(range(size, 0, -1), float)\n\n p.run_model()\n\n if MPI and self.comm.rank == 0:\n self.assertTrue(all(C3._outputs['outvec'] == np.array(range(size, 0, -1), float)*4))\n\n\nclass DeprecatedMPITests(unittest.TestCase):\n\n N_PROCS = 2\n\n def test_distrib_idx_in_full_out_deprecated(self):\n\n class DeprecatedDistribInputComp(om.ExplicitComponent):\n \"\"\"Deprecated version of DistribInputComp, uses attribute instead of option.\"\"\"\n\n def __init__(self, arr_size=11):\n super(DeprecatedDistribInputComp, self).__init__()\n self.arr_size = arr_size\n self.distributed = True\n\n def compute(self, inputs, outputs):\n if MPI:\n self.comm.Allgatherv(inputs['invec']*2.0,\n [outputs['outvec'], self.sizes,\n self.offsets, MPI.DOUBLE])\n else:\n outputs['outvec'] = inputs['invec'] * 2.0\n\n def setup(self):\n comm = self.comm\n rank = comm.rank\n\n self.sizes, self.offsets = evenly_distrib_idxs(comm.size, self.arr_size)\n start = self.offsets[rank]\n end = start + self.sizes[rank]\n\n self.add_input('invec', np.ones(self.sizes[rank], float),\n src_indices=np.arange(start, end, dtype=int))\n self.add_output('outvec', np.ones(self.arr_size, float),\n shape=np.int32(self.arr_size))\n\n size = 11\n\n p = om.Problem()\n top = p.model\n\n C1 = top.add_subsystem(\"C1\", InOutArrayComp(arr_size=size))\n\n # check deprecation on setter & getter\n msg = \"The 'distributed' property provides backwards compatibility \" \\\n \"with OpenMDAO <= 2.4.0 ; use the 'distributed' option instead.\"\n\n with assert_warning(DeprecationWarning, msg):\n C2 = top.add_subsystem(\"C2\", DeprecatedDistribInputComp(arr_size=size))\n\n with assert_warning(DeprecationWarning, msg):\n C2.distributed\n\n # continue to make sure everything still works with the deprecation\n top.connect('C1.outvec', 'C2.invec')\n\n # Conclude setup but don't run model.\n msg = \"The 'distributed' option is set to True for Component C2, \" \\\n \"but there is no distributed vector implementation (MPI/PETSc) \" \\\n \"available. The default non-distributed vectors will be used.\"\n\n if PETScVector is None:\n with assert_warning(UserWarning, msg):\n p.setup()\n else:\n p.setup()\n\n p.final_setup()\n\n C1._inputs['invec'] = np.array(range(size, 0, -1), float)\n\n p.run_model()\n\n self.assertTrue(all(C2._outputs['outvec'] == np.array(range(size, 0, -1), float)*4))\n\n\[email protected](PETScVector, \"PETSc is required.\")\[email protected](MPI, \"MPI is required.\")\nclass ProbRemoteTests(unittest.TestCase):\n\n N_PROCS = 4\n\n def test_prob_getval_dist_par(self):\n size = 3\n\n p = om.Problem()\n top = p.model\n par = top.add_subsystem('par', om.ParallelGroup())\n C1 = par.add_subsystem(\"C1\", DistribInputDistribOutputComp(arr_size=size))\n C2 = par.add_subsystem(\"C2\", DistribInputDistribOutputComp(arr_size=size))\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n if C1 in p.model.par._subsystems_myproc:\n C1._inputs['invec'] = np.array(range(C1._inputs._data.size, 0, -1), float)\n\n if C2 in p.model.par._subsystems_myproc:\n C2._inputs['invec'] = np.array(range(C2._inputs._data.size, 0, -1), float) * 3\n\n p.run_model()\n\n ans = p.get_val('par.C2.invec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([6, 3,3], dtype=float))\n ans = p.get_val('par.C2.outvec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([12, 6, 6], dtype=float))\n ans = p.get_val('par.C1.invec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([2, 1, 1], dtype=float))\n ans = p.get_val('par.C1.outvec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([4, 2, 2], dtype=float))\n\n def test_prob_getval_dist_par_disc(self):\n size = 3\n\n p = om.Problem()\n top = p.model\n par = top.add_subsystem('par', om.ParallelGroup())\n C1 = par.add_subsystem(\"C1\", DistribInputDistribOutputDiscreteComp(arr_size=size))\n C2 = par.add_subsystem(\"C2\", DistribInputDistribOutputDiscreteComp(arr_size=size))\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n if C1 in p.model.par._subsystems_myproc:\n C1._inputs['invec'] = np.array(range(C1._inputs._data.size, 0, -1), float)\n C1._discrete_inputs['disc_in'] = 'C1foo'\n\n if C2 in p.model.par._subsystems_myproc:\n C2._inputs['invec'] = np.array(range(C2._inputs._data.size, 0, -1), float) * 3\n C2._discrete_inputs['disc_in'] = 'C2foo'\n\n p.run_model()\n\n ans = p.get_val('par.C2.invec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([6, 3,3], dtype=float))\n ans = p.get_val('par.C2.outvec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([12, 6, 6], dtype=float))\n ans = p.get_val('par.C1.invec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([2, 1, 1], dtype=float))\n ans = p.get_val('par.C1.outvec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([4, 2, 2], dtype=float))\n\n if C1 in p.model.par._subsystems_myproc:\n ans = p.get_val('par.C1.disc_in', get_remote=False)\n self.assertEqual(ans, 'C1foo')\n ans = p.get_val('par.C1.disc_out', get_remote=False)\n self.assertEqual(ans, 'C1foobar')\n\n if C2 in p.model.par._subsystems_myproc:\n ans = p.get_val('par.C2.disc_in', get_remote=False)\n self.assertEqual(ans, 'C2foo')\n ans = p.get_val('par.C2.disc_out', get_remote=False)\n self.assertEqual(ans, 'C2foobar')\n\n ans = p.get_val('par.C1.disc_in', get_remote=True)\n self.assertEqual(ans, 'C1foo')\n ans = p.get_val('par.C2.disc_in', get_remote=True)\n self.assertEqual(ans, 'C2foo')\n ans = p.get_val('par.C1.disc_out', get_remote=True)\n self.assertEqual(ans, 'C1foobar')\n ans = p.get_val('par.C2.disc_out', get_remote=True)\n self.assertEqual(ans, 'C2foobar')\n\n\n def test_prob_getval_dist_disc(self):\n size = 14\n\n p = om.Problem()\n\n top = p.model\n C1 = top.add_subsystem(\"C1\", DistribInputDistribOutputDiscreteComp(arr_size=size))\n p.setup()\n\n # Conclude setup but don't run model.\n p.final_setup()\n\n rank = p.comm.rank\n\n C1._inputs['invec'] = np.array(range(C1._inputs._data.size, 0, -1), float) * (rank + 1)\n C1._discrete_inputs['disc_in'] = 'boo'\n\n p.run_model()\n\n if rank == 0:\n ans = p.get_val('C1.invec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([4,3,2,1], dtype=float))\n ans = p.get_val('C1.outvec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([8,6,4,2], dtype=float))\n elif rank == 1:\n ans = p.get_val('C1.invec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([8,6,4,2], dtype=float))\n ans = p.get_val('C1.outvec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([16,12,8,4], dtype=float))\n elif rank == 2:\n ans = p.get_val('C1.invec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([9,6,3], dtype=float))\n ans = p.get_val('C1.outvec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([18,12,6], dtype=float))\n elif rank == 3:\n ans = p.get_val('C1.invec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([12,8,4], dtype=float))\n ans = p.get_val('C1.outvec', get_remote=False)\n np.testing.assert_allclose(ans, np.array([24,16,8], dtype=float))\n\n ans = p.get_val('C1.invec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([4,3,2,1,8,6,4,2,9,6,3,12,8,4], dtype=float))\n ans = p.get_val('C1.outvec', get_remote=True)\n np.testing.assert_allclose(ans, np.array([8,6,4,2,16,12,8,4,18,12,6,24,16,8], dtype=float))\n\n ans = p.get_val('C1.disc_in', get_remote=False)\n self.assertEqual(ans, 'boo')\n ans = p.get_val('C1.disc_in', get_remote=True)\n self.assertEqual(ans, 'boo')\n ans = p.get_val('C1.disc_out', get_remote=False)\n self.assertEqual(ans, 'boobar')\n ans = p.get_val('C1.disc_out', get_remote=True)\n self.assertEqual(ans, 'boobar')\n\n\[email protected](PETScVector, \"PETSc is required.\")\[email protected](MPI, \"MPI is required.\")\nclass MPIFeatureTests(unittest.TestCase):\n\n N_PROCS = 2\n\n def test_distribcomp_feature(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.utils.mpi import MPI\n from openmdao.utils.array_utils import evenly_distrib_idxs\n\n if not MPI:\n raise unittest.SkipTest()\n\n rank = MPI.COMM_WORLD.rank\n size = 15\n\n class DistribComp(om.ExplicitComponent):\n def initialize(self):\n self.options['distributed'] = True\n\n self.options.declare('size', types=int, default=1,\n desc=\"Size of input and output vectors.\")\n\n def setup(self):\n comm = self.comm\n rank = comm.rank\n\n size = self.options['size']\n\n # results in 8 entries for proc 0 and 7 entries for proc 1 when using 2 processes.\n sizes, offsets = evenly_distrib_idxs(comm.size, size)\n start = offsets[rank]\n end = start + sizes[rank]\n\n self.add_input('invec', np.ones(sizes[rank], float),\n src_indices=np.arange(start, end, dtype=int))\n\n self.add_output('outvec', np.ones(sizes[rank], float))\n\n def compute(self, inputs, outputs):\n if self.comm.rank == 0:\n outputs['outvec'] = inputs['invec'] * 2.0\n else:\n outputs['outvec'] = inputs['invec'] * -3.0\n\n class Summer(om.ExplicitComponent):\n \"\"\"Sums a distributed input.\"\"\"\n\n def initialize(self):\n self.options.declare('size', types=int, default=1,\n desc=\"Size of input and output vectors.\")\n\n def setup(self):\n comm = self.comm\n rank = comm.rank\n\n size = self.options['size']\n\n # this results in 8 entries for proc 0 and 7 entries for proc 1\n # when using 2 processes.\n sizes, offsets = evenly_distrib_idxs(comm.size, size)\n start = offsets[rank]\n end = start + sizes[rank]\n\n # NOTE: you must specify src_indices here for the input. Otherwise,\n # you'll connect the input to [0:local_input_size] of the\n # full distributed output!\n self.add_input('invec', np.ones(sizes[rank], float),\n src_indices=np.arange(start, end, dtype=int))\n\n self.add_output('out', 0.0)\n\n def compute(self, inputs, outputs):\n data = np.zeros(1)\n data[0] = np.sum(inputs['invec'])\n\n total = np.zeros(1)\n self.comm.Allreduce(data, total, op=MPI.SUM)\n\n outputs['out'] = total[0]\n\n p = om.Problem()\n top = p.model\n top.add_subsystem(\"indep\", om.IndepVarComp('x', np.zeros(size)))\n top.add_subsystem(\"C2\", DistribComp(size=size))\n top.add_subsystem(\"C3\", Summer(size=size))\n\n top.connect('indep.x', 'C2.invec')\n top.connect('C2.outvec', 'C3.invec')\n\n p.setup()\n\n p['indep.x'] = np.ones(size)\n\n p.run_model()\n\n assert_rel_error(self, p['C3.out'], -5.)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestGroupMPI(unittest.TestCase):\n N_PROCS = 2\n\n def test_promote_distrib(self):\n import numpy as np\n\n import openmdao.api as om\n\n class MyComp(om.ExplicitComponent):\n def setup(self):\n # decide what parts of the array we want based on our rank\n if self.comm.rank == 0:\n idxs = [0, 1, 2]\n else:\n # use [3, -1] here rather than [3, 4] just to show that we\n # can use negative indices.\n idxs = [3, -1]\n\n self.add_input('x', np.ones(len(idxs)), src_indices=idxs)\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])*2.0\n\n p = om.Problem()\n\n p.model.add_subsystem('indep', om.IndepVarComp('x', np.arange(5, dtype=float)),\n promotes_outputs=['x'])\n\n p.model.add_subsystem('C1', MyComp(),\n promotes_inputs=['x'])\n\n p.setup()\n p.run_model()\n\n # each rank holds the assigned portion of the input array\n assert_rel_error(self, p['C1.x'],\n np.arange(3, dtype=float) if p.model.C1.comm.rank == 0 else\n np.arange(3, 5, dtype=float))\n\n # the output in each rank is based on the local inputs\n assert_rel_error(self, p['C1.y'], 6. if p.model.C1.comm.rank == 0 else 14.)\n\n\nif __name__ == '__main__':\n from openmdao.utils.mpi import mpirun_tests\n mpirun_tests()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.arange", "numpy.hstack", "numpy.int32" ] ]
edpsw/imbalanced-learn
[ "a376fa6feb5c7d630a3cab5623b442f65eb97f66" ]
[ "imblearn/under_sampling/_prototype_generation/tests/test_cluster_centroids.py" ]
[ "\"\"\"Test the module cluster centroids.\"\"\"\nfrom collections import Counter\n\nimport pytest\nimport numpy as np\nfrom scipy import sparse\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import make_classification\n\nfrom imblearn.under_sampling import ClusterCentroids\n\nRND_SEED = 0\nX = np.array(\n [\n [0.04352327, -0.20515826],\n [0.92923648, 0.76103773],\n [0.20792588, 1.49407907],\n [0.47104475, 0.44386323],\n [0.22950086, 0.33367433],\n [0.15490546, 0.3130677],\n [0.09125309, -0.85409574],\n [0.12372842, 0.6536186],\n [0.13347175, 0.12167502],\n [0.094035, -2.55298982],\n ]\n)\nY = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])\nR_TOL = 1e-4\n\n\[email protected](\n \"X, expected_voting\", [(X, \"soft\"), (sparse.csr_matrix(X), \"hard\")]\n)\ndef test_fit_resample_check_voting(X, expected_voting):\n cc = ClusterCentroids(random_state=RND_SEED)\n cc.fit_resample(X, Y)\n assert cc.voting_ == expected_voting\n\n\ndef test_fit_resample_auto():\n sampling_strategy = \"auto\"\n cc = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=RND_SEED)\n X_resampled, y_resampled = cc.fit_resample(X, Y)\n assert X_resampled.shape == (6, 2)\n assert y_resampled.shape == (6,)\n\n\ndef test_fit_resample_half():\n sampling_strategy = {0: 3, 1: 6}\n cc = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=RND_SEED)\n X_resampled, y_resampled = cc.fit_resample(X, Y)\n assert X_resampled.shape == (9, 2)\n assert y_resampled.shape == (9,)\n\n\ndef test_multiclass_fit_resample():\n y = Y.copy()\n y[5] = 2\n y[6] = 2\n cc = ClusterCentroids(random_state=RND_SEED)\n _, y_resampled = cc.fit_resample(X, y)\n count_y_res = Counter(y_resampled)\n assert count_y_res[0] == 2\n assert count_y_res[1] == 2\n assert count_y_res[2] == 2\n\n\ndef test_fit_resample_object():\n sampling_strategy = \"auto\"\n cluster = KMeans(random_state=RND_SEED)\n cc = ClusterCentroids(\n sampling_strategy=sampling_strategy,\n random_state=RND_SEED,\n estimator=cluster,\n )\n\n X_resampled, y_resampled = cc.fit_resample(X, Y)\n assert X_resampled.shape == (6, 2)\n assert y_resampled.shape == (6,)\n\n\ndef test_fit_hard_voting():\n sampling_strategy = \"auto\"\n voting = \"hard\"\n cluster = KMeans(random_state=RND_SEED)\n cc = ClusterCentroids(\n sampling_strategy=sampling_strategy,\n random_state=RND_SEED,\n estimator=cluster,\n voting=voting,\n )\n\n X_resampled, y_resampled = cc.fit_resample(X, Y)\n assert X_resampled.shape == (6, 2)\n assert y_resampled.shape == (6,)\n for x in X_resampled:\n assert np.any(np.all(x == X, axis=1))\n\n\[email protected](\n \"cluster_centroids_params, err_msg\",\n [\n ({\"estimator\": \"rnd\"}, \"has to be a KMeans clustering\"),\n ({\"voting\": \"unknown\"}, \"needs to be one of\"),\n ],\n)\ndef test_fit_resample_error(cluster_centroids_params, err_msg):\n cc = ClusterCentroids(**cluster_centroids_params)\n with pytest.raises(ValueError, match=err_msg):\n cc.fit_resample(X, Y)\n\n\ndef test_cluster_centroids_n_jobs():\n # check that we deprecate the `n_jobs` parameter.\n cc = ClusterCentroids(n_jobs=1)\n with pytest.warns(FutureWarning) as record:\n cc.fit_resample(X, Y)\n assert len(record) == 1\n assert \"'n_jobs' was deprecated\" in record[0].message.args[0]\n\n\ndef test_cluster_centroids_hard_target_class():\n # check that the samples selecting by the hard voting corresponds to the\n # targeted class\n # non-regression test for:\n # https://github.com/scikit-learn-contrib/imbalanced-learn/issues/738\n X, y = make_classification(\n n_samples=1000,\n n_features=2,\n n_informative=1,\n n_redundant=0,\n n_repeated=0,\n n_clusters_per_class=1,\n weights=[0.3, 0.7],\n class_sep=0.01,\n random_state=0,\n )\n\n cc = ClusterCentroids(voting=\"hard\", random_state=0)\n X_res, y_res = cc.fit_resample(X, y)\n\n minority_class_indices = np.flatnonzero(y == 0)\n X_minority_class = X[minority_class_indices]\n\n resampled_majority_class_indices = np.flatnonzero(y_res == 1)\n X_res_majority = X_res[resampled_majority_class_indices]\n\n sample_from_minority_in_majority = [\n np.all(np.isclose(selected_sample, minority_sample))\n for selected_sample in X_res_majority\n for minority_sample in X_minority_class\n ]\n assert sum(sample_from_minority_in_majority) == 0\n" ]
[ [ "numpy.array", "numpy.isclose", "sklearn.cluster.KMeans", "sklearn.datasets.make_classification", "numpy.all", "scipy.sparse.csr_matrix", "numpy.flatnonzero" ] ]
tobeperson/sigsep-mus-eval
[ "e8e1ff92f97098d4f2b298fd218f3e826e476146" ]
[ "tests/test_museval.py" ]
[ "import numpy as np\nimport pytest\nimport museval.metrics as metrics\nimport museval\n\n\[email protected](params=[1, 2])\ndef nb_sources(request):\n return request.param\n\n\[email protected](params=[1, 2])\ndef nb_channels(request):\n return request.param\n\n\[email protected](params=[100, 1000])\ndef nb_samples(request):\n return request.param\n\n\[email protected](params=[-10, 10])\ndef nb_samples_diff(request):\n return request.param\n\n\ndef test_pad_or_truncate(\n nb_sources, nb_channels, nb_samples, nb_samples_diff\n):\n references = np.random.random(\n (nb_sources, nb_samples, nb_channels)\n )\n estimates = np.random.random(\n (nb_sources, nb_samples + nb_samples_diff, nb_channels)\n )\n\n references, estimates = museval.pad_or_truncate(references, estimates)\n assert references.shape[1] == estimates.shape[1]\n" ]
[ [ "numpy.random.random" ] ]