repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
georgetown-cset/ai-relevant-papers | [
"bab36b738385208165d7c4f269ccd2afa91fc2c3"
] | [
"scibert/models/bert_text_classifier.py"
] | [
"from typing import Dict, Optional, List, Any\n\nimport torch\nimport torch.nn.functional as F\nfrom allennlp.data import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator\nfrom allennlp.nn import util\nfrom allennlp.training.metrics import CategoricalAccuracy, F1Measure\nfrom overrides import overrides\nfrom scibert.models.text_classifier import TextClassifier\n\nfrom pytorch_toolbelt.losses import BinaryFocalLoss, FocalLoss\n\[email protected](\"bert_text_classifier\")\nclass BertTextClassifier(TextClassifier):\n \"\"\"\n Implements a basic text classifier:\n 1) Embed tokens using `text_field_embedder`\n 2) Get the CLS token\n 3) Final feedforward layer\n\n Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1.\n \"\"\"\n def __init__(self, vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n verbose_metrics: False,\n dropout: float = 0.2,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None,\n loss: Optional[dict] = None,\n ) -> None:\n super(TextClassifier, self).__init__(vocab, regularizer)\n\n self.text_field_embedder = text_field_embedder\n self.dropout = torch.nn.Dropout(dropout)\n self.num_classes = self.vocab.get_vocab_size(\"labels\")\n self.classifier_feedforward = torch.nn.Linear(self.text_field_embedder.get_output_dim() , self.num_classes)\n\n self.label_accuracy = CategoricalAccuracy()\n self.label_f1_metrics = {}\n\n self.verbose_metrics = verbose_metrics\n\n for i in range(self.num_classes):\n self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace=\"labels\")] = F1Measure(positive_label=i)\n\n if loss is None or loss.get('type') == 'CrossEntropyLoss':\n self.loss = torch.nn.CrossEntropyLoss()\n elif loss.get('type') == 'BinaryFocalLoss':\n self.loss = BinaryFocalLoss(alpha=loss.get('alpha'), gamma=loss.get('gamma'))\n elif loss.get('type') == 'FocalLoss':\n self.loss = FocalLoss(alpha=loss.get('alpha'), gamma=loss.get('gamma'))\n elif loss.get('type') == 'MultiLabelMarginLoss':\n self.loss = torch.nn.MultiLabelMarginLoss()\n elif loss.get('type') == 'MultiLabelSoftMarginLoss':\n self.loss = torch.nn.MultiLabelSoftMarginLoss(\n weight=torch.tensor(loss.get('weight')) if 'weight' in loss else None)\n else:\n raise ValueError(f'Unexpected loss \"{loss}\"')\n\n initializer(self)\n\n @overrides\n def forward(self,\n text: Dict[str, torch.LongTensor],\n label: torch.IntTensor = None,\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\n \"\"\"\n Parameters\n ----------\n text : Dict[str, torch.LongTensor]\n From a ``TextField``\n label : torch.IntTensor, optional (default = None)\n From a ``LabelField``\n metadata : ``List[Dict[str, Any]]``, optional, (default = None)\n Metadata containing the original tokenization of the premise and\n hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.\n Returns\n -------\n An output dictionary consisting of:\n label_logits : torch.FloatTensor\n A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label.\n label_probs : torch.FloatTensor\n A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label.\n loss : torch.FloatTensor, optional\n A scalar loss to be optimised.\n \"\"\"\n embedded_text = self.text_field_embedder(text)\n pooled = self.dropout(embedded_text[:, 0, :])\n logits = self.classifier_feedforward(pooled)\n class_probs = F.softmax(logits, dim=1)\n\n output_dict = {\"logits\": logits}\n if label is not None:\n loss = self.loss(logits, label)\n output_dict[\"loss\"] = loss\n\n # compute F1 per label\n for i in range(self.num_classes):\n metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace=\"labels\")]\n metric(class_probs, label)\n self.label_accuracy(logits, label)\n return output_dict\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.nn.MultiLabelMarginLoss",
"torch.nn.functional.softmax"
]
] |
benjaminr/biopython | [
"ad4fe9b0026fd9d4aa417168f4b620f6a681a0ff"
] | [
"Bio/codonalign/codonseq.py"
] | [
"# Copyright 2013 by Zheng Ruan ([email protected]).\n# All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\"\"\"Code for dealing with coding sequence.\n\nCodonSeq class is inherited from Seq class. This is the core class to\ndeal with sequences in CodonAlignment in biopython.\n\n\"\"\"\nfrom __future__ import division, print_function\nfrom itertools import permutations\nfrom math import log\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Alphabet import generic_dna, _ungap\n\nfrom Bio.codonalign.codonalphabet import CodonAlphabet, default_codon_alphabet, default_codon_table\n\n\nclass CodonSeq(Seq):\n \"\"\"CodonSeq is designed to be within the SeqRecords of a CodonAlignment class.\n\n CodonSeq is useful as it allows the user to specify\n reading frame when translate CodonSeq\n\n CodonSeq also accepts codon style slice by calling\n get_codon() method.\n\n **Important:** Ungapped CodonSeq can be any length if you\n specify the rf_table. Gapped CodonSeq should be a\n multiple of three.\n\n >>> codonseq = CodonSeq(\"AAATTTGGGCCAAATTT\", rf_table=(0,3,6,8,11,14))\n >>> print(codonseq.translate())\n KFGAKF\n\n test get_full_rf_table method\n\n >>> p = CodonSeq('AAATTTCCCGG-TGGGTTTAA', rf_table=(0, 3, 6, 9, 11, 14, 17))\n >>> full_rf_table = p.get_full_rf_table()\n >>> print(full_rf_table)\n [0, 3, 6, 9, 12, 15, 18]\n >>> print(p.translate(rf_table=full_rf_table, ungap_seq=False))\n KFPPWV*\n >>> p = CodonSeq('AAATTTCCCGGGAA-TTTTAA', rf_table=(0, 3, 6, 9, 14, 17))\n >>> print(p.get_full_rf_table())\n [0, 3, 6, 9, 12.0, 15, 18]\n >>> p = CodonSeq('AAA------------TAA', rf_table=(0, 3))\n >>> print(p.get_full_rf_table())\n [0, 3.0, 6.0, 9.0, 12.0, 15]\n\n \"\"\"\n\n def __init__(self, data='', alphabet=default_codon_alphabet,\n gap_char=\"-\", rf_table=None):\n \"\"\"Initialize the class.\"\"\"\n # rf_table should be a tuple or list indicating the every\n # codon position along the sequence. For example:\n # sequence = 'AAATTTGGGCCAAATTT'\n # rf_table = (0, 3, 6, 8, 11, 14)\n # the translated protein sequences will be\n # AAA TTT GGG GCC AAA TTT\n # K F G A K F\n # Notice: rf_table applies to ungapped sequence. If there\n # are gaps in the sequence, they will be discarded. This\n # feature ensures the rf_table is independent of where the\n # codon sequence appears in the alignment\n\n Seq.__init__(self, data.upper(), alphabet=alphabet)\n self.gap_char = gap_char\n\n if not isinstance(alphabet, CodonAlphabet):\n raise TypeError(\"Input alphabet should be a CodonAlphabet object.\")\n # check the length of the alignment to be a triple\n if rf_table is None:\n seq_ungapped = self._data.replace(gap_char, \"\")\n if len(self) % 3 != 0:\n raise ValueError(\"Sequence length is not a multiple of \"\n \"three (i.e. a whole number of codons)\")\n self.rf_table = list(filter(lambda x: x % 3 == 0,\n range(len(seq_ungapped))))\n # check alphabet\n # Not use Alphabet._verify_alphabet function because it\n # only works for single alphabet\n for i in self.rf_table:\n if self._data[i:i + 3] not in alphabet.letters:\n raise ValueError(\"Sequence contain codon not in the alphabet \"\n \"({0})! \".format(self._data[i:i + 3]))\n else:\n # if gap_char in self._data:\n # assert len(self) % 3 == 0, \\\n # \"Gapped sequence length is not a triple number\"\n if not isinstance(rf_table, (tuple, list)):\n raise TypeError(\"rf_table should be a tuple or list object\")\n if not all(isinstance(i, int) for i in rf_table):\n raise TypeError(\"Elements in rf_table should be int \"\n \"that specify the codon positions of \"\n \"the sequence\")\n seq_ungapped = self._data.replace(gap_char, \"\")\n for i in rf_table:\n if seq_ungapped[i:i + 3] not in alphabet.letters:\n raise ValueError(\"Sequence contain undefined letters \"\n \"from alphabet \"\n \"({0})!\".format(seq_ungapped[i:i + 3]))\n self.rf_table = rf_table\n\n def __getitem__(self, index):\n # TODO: handle alphabet elegantly\n return Seq(self._data[index], alphabet=generic_dna)\n\n def get_codon(self, index):\n \"\"\"Get the `index`-th codon from the sequence.\"\"\"\n if len(set(i % 3 for i in self.rf_table)) != 1:\n raise RuntimeError(\"frameshift detected. \"\n \"CodonSeq object is not able to deal \"\n \"with codon sequence with frameshift. \"\n \"Please use normal slice option.\")\n if isinstance(index, int):\n if index != -1:\n return self._data[index * 3:(index + 1) * 3]\n else:\n return self._data[index * 3:]\n else:\n # This slice ensures that codon will always be the unit\n # in slicing (it won't change to other codon if you are\n # using reverse slicing such as [::-1]).\n # The idea of the code below is to first map the slice\n # to amino acid sequence and then transform it into\n # codon sequence.\n aa_index = range(len(self) // 3)\n\n def cslice(p):\n aa_slice = aa_index[p]\n codon_slice = ''\n for i in aa_slice:\n codon_slice += self._data[i * 3:i * 3 + 3]\n return codon_slice\n\n codon_slice = cslice(index)\n return CodonSeq(codon_slice, alphabet=self.alphabet)\n\n def get_codon_num(self):\n \"\"\"Return the number of codons in the CodonSeq.\"\"\"\n return len(self.rf_table)\n\n def translate(self, codon_table=default_codon_table,\n stop_symbol=\"*\", rf_table=None, ungap_seq=True):\n \"\"\"Translate the CodonSeq based on the reading frame in rf_table.\n\n It is possible for the user to specify\n a rf_table at this point. If you want to include\n gaps in the translated sequence, this is the only\n way. ungap_seq should be set to true for this\n purpose.\n \"\"\"\n amino_acids = []\n if ungap_seq:\n tr_seq = self._data.replace(self.gap_char, \"\")\n else:\n tr_seq = self._data\n if rf_table is None:\n rf_table = self.rf_table\n p = -1 # initiation\n for i in rf_table:\n if isinstance(i, float):\n amino_acids.append('-')\n continue\n # elif '---' == tr_seq[i:i+3]:\n # amino_acids.append('-')\n # continue\n elif '-' in tr_seq[i:i + 3]:\n # considering two types of frameshift\n if p == -1 or p - i == 3:\n p = i\n codon = tr_seq[i:i + 6].replace('-', '')[:3]\n elif p - i > 3:\n codon = tr_seq[i:i + 3]\n p = i\n else:\n # normal condition without gaps\n codon = tr_seq[i:i + 3]\n p = i\n if codon in codon_table.stop_codons:\n amino_acids.append(stop_symbol)\n continue\n try:\n amino_acids.append(codon_table.forward_table[codon])\n except KeyError:\n raise RuntimeError(\"Unknown codon detected ({0}). Did you \"\n \"forget to specify the ungap_seq \"\n \"argument?\".format(codon))\n return \"\".join(amino_acids)\n\n def toSeq(self, alphabet=generic_dna):\n return Seq(self._data, generic_dna)\n\n def get_full_rf_table(self):\n \"\"\"Return full rf_table of the CodonSeq records.\n\n A full rf_table is different from a normal rf_table in that\n it translate gaps in CodonSeq. It is helpful to construct\n alignment containing frameshift.\n \"\"\"\n ungap_seq = self._data.replace(\"-\", \"\")\n codon_lst = [ungap_seq[i:i + 3] for i in self.rf_table]\n relative_pos = [self.rf_table[0]]\n for i in range(1, len(self.rf_table[1:]) + 1):\n relative_pos.append(self.rf_table[i] - self.rf_table[i - 1])\n full_rf_table = []\n codon_num = 0\n for i in filter(lambda x: x % 3 == 0, range(len(self._data))):\n if self._data[i:i + 3] == self.gap_char * 3:\n full_rf_table.append(i + 0.0)\n elif relative_pos[codon_num] == 0:\n full_rf_table.append(i)\n codon_num += 1\n elif relative_pos[codon_num] in (-1, -2):\n # check the gap status of previous codon\n gap_stat = len(self._data[i - 3:i].replace(\"-\", \"\"))\n if gap_stat == 3:\n full_rf_table.append(i + relative_pos[codon_num])\n elif gap_stat == 2:\n full_rf_table.append(i + 1 + relative_pos[codon_num])\n elif gap_stat == 1:\n full_rf_table.append(i + 2 + relative_pos[codon_num])\n codon_num += 1\n elif relative_pos[codon_num] > 0:\n full_rf_table.append(i + 0.0)\n try:\n this_len = len(self._data[i:i + 3].replace(\"-\", \"\"))\n relative_pos[codon_num] -= this_len\n except Exception: # TODO: IndexError?\n # we probably reached the last codon\n pass\n return full_rf_table\n\n def full_translate(self, codon_table=default_codon_table, stop_symbol=\"*\"):\n \"\"\"Apply full translation with gaps considered.\"\"\"\n full_rf_table = self.get_full_rf_table()\n return self.translate(codon_table=codon_table, stop_symbol=stop_symbol,\n rf_table=full_rf_table, ungap_seq=False)\n\n def ungap(self, gap=None):\n if hasattr(self.alphabet, \"gap_char\"):\n if not gap:\n gap = self.alphabet.gap_char\n elif gap != self.alphabet.gap_char:\n raise ValueError(\"Gap %s does not match %s from alphabet\" %\n (repr(gap), repr(self.alphabet.alphabet.gap_char)))\n alpha = _ungap(self.alphabet)\n elif not gap:\n raise ValueError(\"Gap character not given and not defined in \"\n \"alphabet\")\n else:\n alpha = self.alphabet # modify!\n if len(gap) != 1 or not isinstance(gap, str):\n raise ValueError(\"Unexpected gap character, %s\" % repr(gap))\n return CodonSeq(str(self._data).replace(gap, \"\"), alpha,\n rf_table=self.rf_table)\n\n @classmethod\n def from_seq(cls, seq, alphabet=default_codon_alphabet, rf_table=None):\n if rf_table is None:\n return cls(seq._data, alphabet=alphabet)\n else:\n return cls(seq._data, alphabet=alphabet, rf_table=rf_table)\n\n\ndef _get_codon_list(codonseq):\n \"\"\"List of codons according to full_rf_table for counting (PRIVATE).\"\"\"\n # if not isinstance(codonseq, CodonSeq):\n # raise TypeError(\"_get_codon_list accept a CodonSeq object \"\n # \"({0} detected)\".format(type(codonseq)))\n full_rf_table = codonseq.get_full_rf_table()\n codon_lst = []\n for i, k in enumerate(full_rf_table):\n if isinstance(k, int):\n start = k\n try:\n end = int(full_rf_table[i + 1])\n except IndexError:\n end = start + 3\n this_codon = str(codonseq[start:end])\n if len(this_codon) == 3:\n codon_lst.append(this_codon)\n else:\n codon_lst.append(str(this_codon.ungap()))\n elif str(codonseq[int(k):int(k) + 3]) == \"---\":\n codon_lst.append(\"---\")\n else:\n # this may be problematic, as normally no codon should\n # fall into this condition\n codon_lst.append(codonseq[int(k):int(k) + 3])\n return codon_lst\n\n\ndef cal_dn_ds(codon_seq1, codon_seq2, method=\"NG86\",\n codon_table=default_codon_table, k=1, cfreq=None):\n \"\"\"Calculate dN and dS of the given two sequences.\n\n Available methods:\n - NG86 - `Nei and Gojobori (1986)`_ (PMID 3444411).\n - LWL85 - `Li et al. (1985)`_ (PMID 3916709).\n - ML - `Goldman and Yang (1994)`_ (PMID 7968486).\n - YN00 - `Yang and Nielsen (2000)`_ (PMID 10666704).\n\n .. _`Nei and Gojobori (1986)`: http://www.ncbi.nlm.nih.gov/pubmed/3444411\n .. _`Li et al. (1985)`: http://www.ncbi.nlm.nih.gov/pubmed/3916709\n .. _`Goldman and Yang (1994)`: http://mbe.oxfordjournals.org/content/11/5/725\n .. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236\n\n Arguments:\n - codon_seq1 - CodonSeq or or SeqRecord that contains a CodonSeq\n - codon_seq2 - CodonSeq or or SeqRecord that contains a CodonSeq\n - w - transition/transversion ratio\n - cfreq - Current codon frequency vector can only be specified\n when you are using ML method. Possible ways of\n getting cfreq are: F1x4, F3x4 and F61.\n\n \"\"\"\n if isinstance(codon_seq1, CodonSeq) and isinstance(codon_seq2, CodonSeq):\n pass\n elif isinstance(codon_seq1, SeqRecord) and isinstance(codon_seq2, SeqRecord):\n codon_seq1 = codon_seq1.seq\n codon_seq2 = codon_seq2.seq\n else:\n raise TypeError(\"cal_dn_ds accepts two CodonSeq objects or SeqRecord \"\n \"that contains CodonSeq as its seq!\")\n if len(codon_seq1.get_full_rf_table()) != \\\n len(codon_seq2.get_full_rf_table()):\n raise RuntimeError(\"full_rf_table length of seq1 ({0}) and seq2 ({1}) \"\n \"are not the same\".format(\n len(codon_seq1.get_full_rf_table()),\n len(codon_seq2.get_full_rf_table()))\n )\n if cfreq is None:\n cfreq = 'F3x4'\n elif cfreq is not None and method != 'ML':\n raise RuntimeError(\"cfreq can only be specified when you \"\n \"are using ML method\")\n if cfreq not in ('F1x4', 'F3x4', 'F61'):\n import warnings\n warnings.warn(\"Unknown cfreq ({0}). Only F1x4, F3x4 and F61 are \"\n \"acceptable. Use F3x4 in the following.\".format(cfreq))\n cfreq = 'F3x4'\n seq1_codon_lst = _get_codon_list(codon_seq1)\n seq2_codon_lst = _get_codon_list(codon_seq2)\n # remove gaps in seq_codon_lst\n seq1 = []\n seq2 = []\n for i, j in zip(seq1_codon_lst, seq2_codon_lst):\n if ('-' not in i) and ('-' not in j):\n seq1.append(i)\n seq2.append(j)\n dnds_func = {'ML': _ml, 'NG86': _ng86, 'LWL85': _lwl85, 'YN00': _yn00}\n if method == \"ML\":\n return dnds_func[method](seq1, seq2, cfreq, codon_table)\n else:\n return dnds_func[method](seq1, seq2, k, codon_table)\n\n\n#################################################################\n# private functions for NG86 method\n#################################################################\n\ndef _ng86(seq1, seq2, k, codon_table):\n \"\"\"NG86 method main function (PRIVATE).\"\"\"\n S_sites1, N_sites1 = _count_site_NG86(seq1,\n codon_table=codon_table, k=k)\n S_sites2, N_sites2 = _count_site_NG86(seq2,\n codon_table=codon_table, k=k)\n S_sites = (S_sites1 + S_sites2) / 2.0\n N_sites = (N_sites1 + N_sites2) / 2.0\n SN = [0, 0]\n for i, j in zip(seq1, seq2):\n SN = [m + n for m, n in zip(SN, _count_diff_NG86(i, j,\n codon_table=codon_table))]\n\n ps = SN[0] / S_sites\n pn = SN[1] / N_sites\n if ps < 3 / 4:\n dS = abs(-3.0 / 4 * log(1 - 4.0 / 3 * ps))\n else:\n dS = -1\n if pn < 3 / 4:\n dN = abs(-3.0 / 4 * log(1 - 4.0 / 3 * pn))\n else:\n dN = -1\n return dN, dS\n\n\ndef _count_site_NG86(codon_lst, k=1, codon_table=default_codon_table):\n \"\"\"Count synonymous and non-synonymous sites of a list of codons (PRIVATE).\n\n Arguments:\n - codon_lst - A three letter codon list from a CodonSeq object.\n This can be returned from _get_codon_list method.\n - k - transition/transversion rate ratio.\n\n \"\"\"\n S_site = 0 # synonymous sites\n N_site = 0 # non-synonymous sites\n purine = ('A', 'G')\n pyrimidine = ('T', 'C')\n base_tuple = ('A', 'T', 'C', 'G')\n for codon in codon_lst:\n neighbor_codon = {'transition': [], 'transversion': []}\n # classify neighbor codons\n codon = codon.replace('U', 'T')\n if codon == '---':\n continue\n for n, i in enumerate(codon):\n for j in base_tuple:\n if i == j:\n pass\n elif i in purine and j in purine:\n codon_chars = [c for c in codon]\n codon_chars[n] = j\n this_codon = ''.join(codon_chars)\n neighbor_codon['transition'].append(this_codon)\n elif i in pyrimidine and j in pyrimidine:\n codon_chars = [c for c in codon]\n codon_chars[n] = j\n this_codon = ''.join(codon_chars)\n neighbor_codon['transition'].append(this_codon)\n else:\n codon_chars = [c for c in codon]\n codon_chars[n] = j\n this_codon = ''.join(codon_chars)\n neighbor_codon['transversion'].append(this_codon)\n # count synonymous and non-synonymous sites\n aa = codon_table.forward_table[codon]\n this_codon_N_site = this_codon_S_site = 0\n for neighbor in neighbor_codon['transition']:\n if neighbor in codon_table.stop_codons:\n this_codon_N_site += 1\n elif codon_table.forward_table[neighbor] == aa:\n this_codon_S_site += 1\n else:\n this_codon_N_site += 1\n for neighbor in neighbor_codon['transversion']:\n if neighbor in codon_table.stop_codons:\n this_codon_N_site += k\n elif codon_table.forward_table[neighbor] == aa:\n this_codon_S_site += k\n else:\n this_codon_N_site += k\n norm_const = (this_codon_N_site + this_codon_S_site) / 3\n S_site += this_codon_S_site / norm_const\n N_site += this_codon_N_site / norm_const\n return (S_site, N_site)\n\n\ndef _count_diff_NG86(codon1, codon2, codon_table=default_codon_table):\n \"\"\"Count differences between two codons, three-letter string (PRIVATE).\n\n The function will take multiple pathways from codon1 to codon2\n into account.\n \"\"\"\n if not isinstance(codon1, str) or not isinstance(codon2, str):\n raise TypeError(\"_count_diff_NG86 accepts string object \"\n \"to represent codon ({0}, {1} \"\n \"detected)\".format(type(codon1), type(codon2)))\n if len(codon1) != 3 or len(codon2) != 3:\n raise RuntimeError(\"codon should be three letter string ({0}, {1} \"\n \"detected)\".format(len(codon1), len(codon2)))\n SN = [0, 0] # synonymous and nonsynonymous counts\n if codon1 == '---' or codon2 == '---':\n return SN\n base_tuple = ('A', 'C', 'G', 'T')\n if not all(i in base_tuple for i in codon1):\n raise RuntimeError(\"Unrecognized character detected in codon1 {0} \"\n \"(Codons consist of \"\n \"A, T, C or G)\".format(codon1))\n if not all(i in base_tuple for i in codon2):\n raise RuntimeError(\"Unrecognized character detected in codon2 {0} \"\n \"(Codons consist of \"\n \"A, T, C or G)\".format(codon2))\n if codon1 == codon2:\n return SN\n else:\n diff_pos = []\n for i, k in enumerate(zip(codon1, codon2)):\n if k[0] != k[1]:\n diff_pos.append(i)\n\n def compare_codon(codon1, codon2, codon_table=default_codon_table,\n weight=1):\n \"\"\"Compare two codon accounting for different pathways.\"\"\"\n sd = nd = 0\n if len(set(map(codon_table.forward_table.get,\n [codon1, codon2]))) == 1:\n sd += weight\n else:\n nd += weight\n return (sd, nd)\n\n if len(diff_pos) == 1:\n SN = [i + j for i, j in\n zip(SN, compare_codon(codon1, codon2, codon_table=codon_table))]\n elif len(diff_pos) == 2:\n codon2_aa = codon_table.forward_table[codon2]\n for i in diff_pos:\n temp_codon = codon1[:i] + codon2[i] + codon1[i + 1:]\n SN = [i + j for i, j in zip(SN, compare_codon(\n codon1, temp_codon,\n codon_table=codon_table,\n weight=0.5))\n ]\n SN = [i + j for i, j in zip(SN, compare_codon(\n temp_codon, codon2,\n codon_table=codon_table,\n weight=0.5))\n ]\n elif len(diff_pos) == 3:\n codon2_aa = codon_table.forward_table[codon2]\n paths = list(permutations([0, 1, 2], 3))\n tmp_codon = []\n for p in paths:\n tmp1 = codon1[:p[0]] + codon2[p[0]] + codon1[p[0] + 1:]\n tmp2 = tmp1[:p[1]] + codon2[p[1]] + tmp1[p[1] + 1:]\n tmp_codon.append((tmp1, tmp2))\n SN = [i + j for i, j in zip(SN, compare_codon(codon1, tmp1,\n codon_table,\n weight=0.5 / 3))\n ]\n SN = [i + j for i, j in zip(SN, compare_codon(tmp1, tmp2,\n codon_table,\n weight=0.5 / 3))\n ]\n SN = [i + j for i, j in zip(SN, compare_codon(tmp2, codon2,\n codon_table,\n weight=0.5 / 3))\n ]\n return SN\n\n\n#################################################################\n# private functions for LWL85 method\n#################################################################\n\ndef _lwl85(seq1, seq2, k, codon_table):\n \"\"\"LWL85 method main function (PRIVATE).\n\n Nomenclature is according to Li et al. (1985), PMID 3916709.\n \"\"\"\n codon_fold_dict = _get_codon_fold(codon_table)\n # count number of sites in different degenerate classes\n fold0 = [0, 0]\n fold2 = [0, 0]\n fold4 = [0, 0]\n for codon in seq1 + seq2:\n fold_num = codon_fold_dict[codon]\n for f in fold_num:\n if f == '0':\n fold0[0] += 1\n elif f == '2':\n fold2[0] += 1\n elif f == '4':\n fold4[0] += 1\n L = [sum(fold0) / 2.0, sum(fold2) / 2.0, sum(fold4) / 2.0]\n # count number of differences in different degenerate classes\n PQ = [0] * 6 # with P0, P2, P4, Q0, Q2, Q4 in each position\n for codon1, codon2 in zip(seq1, seq2):\n if (codon1 == \"---\" or codon2 == \"---\") or codon1 == codon2:\n continue\n else:\n PQ = [i + j for i, j in zip(PQ, _diff_codon(\n codon1,\n codon2,\n fold_dict=codon_fold_dict)\n )]\n PQ = [i / j for i, j in zip(PQ, L * 2)]\n P = PQ[:3]\n Q = PQ[3:]\n A = [(1. / 2) * log(1. / (1 - 2 * i - j)) - (1. / 4) * log(1. / (1 - 2 * j))\n for i, j in zip(P, Q)]\n B = [(1. / 2) * log(1. / (1 - 2 * i)) for i in Q]\n dS = 3 * (L[2] * A[1] + L[2] * (A[2] + B[2])) / (L[1] + 3 * L[2])\n dN = 3 * (L[2] * B[1] + L[0] * (A[0] + B[0])) / (2 * L[1] + 3 * L[0])\n return dN, dS\n\n\ndef _get_codon_fold(codon_table):\n \"\"\"Classify different position in a codon into different folds (PRIVATE).\"\"\"\n def find_fold_class(codon, forward_table):\n base = set(['A', 'T', 'C', 'G'])\n fold = ''\n codon_base_lst = [i for i in codon]\n for i, b in enumerate(codon_base_lst):\n other_base = base - set(b)\n aa = []\n for j in other_base:\n codon_base_lst[i] = j\n try:\n aa.append(forward_table[''.join(codon_base_lst)])\n except KeyError:\n aa.append('stop')\n if aa.count(forward_table[codon]) == 0:\n fold += '0'\n elif aa.count(forward_table[codon]) in (1, 2):\n fold += '2'\n elif aa.count(forward_table[codon]) == 3:\n fold += '4'\n else:\n raise RuntimeError(\"Unknown Error, cannot assign the \"\n \"position to a fold\")\n codon_base_lst[i] = b\n return fold\n fold_table = {}\n for codon in codon_table.forward_table:\n if 'U' not in codon:\n fold_table[codon] = find_fold_class(codon,\n codon_table.forward_table)\n fold_table[\"---\"] = '---'\n return fold_table\n\n\ndef _diff_codon(codon1, codon2, fold_dict):\n \"\"\"Count number of different substitution types between two codons (PRIVATE).\n\n returns tuple (P0, P2, P4, Q0, Q2, Q4)\n\n Nomenclature is according to Li et al. (1958), PMID 3916709.\n \"\"\"\n P0 = P2 = P4 = Q0 = Q2 = Q4 = 0\n fold_num = fold_dict[codon1]\n purine = ('A', 'G')\n pyrimidine = ('T', 'C')\n for n, (i, j) in enumerate(zip(codon1, codon2)):\n if i != j and (i in purine and j in purine):\n if fold_num[n] == '0':\n P0 += 1\n elif fold_num[n] == '2':\n P2 += 1\n elif fold_num[n] == '4':\n P4 += 1\n else:\n raise RuntimeError(\"Unexpected fold_num %d\" % fold_num[n])\n if i != j and (i in pyrimidine and j in pyrimidine):\n if fold_num[n] == '0':\n P0 += 1\n elif fold_num[n] == '2':\n P2 += 1\n elif fold_num[n] == '4':\n P4 += 1\n else:\n raise RuntimeError(\"Unexpected fold_num %d\" % fold_num[n])\n if i != j and ((i in purine and j in pyrimidine) or\n (i in pyrimidine and j in purine)):\n if fold_num[n] == '0':\n Q0 += 1\n elif fold_num[n] == '2':\n Q2 += 1\n elif fold_num[n] == '4':\n Q4 += 1\n else:\n raise RuntimeError(\"Unexpected fold_num %d\" % fold_num[n])\n return (P0, P2, P4, Q0, Q2, Q4)\n\n\n#################################################################\n# private functions for YN00 method\n#################################################################\n\ndef _yn00(seq1, seq2, k, codon_table):\n \"\"\"YN00 method main function (PRIVATE).\n\n Nomenclature is according to Yang and Nielsen (2000), PMID 10666704.\n \"\"\"\n from collections import defaultdict\n from scipy.linalg import expm\n fcodon = [{'A': 0, 'G': 0, 'C': 0, 'T': 0},\n {'A': 0, 'G': 0, 'C': 0, 'T': 0},\n {'A': 0, 'G': 0, 'C': 0, 'T': 0}]\n codon_fold_dict = _get_codon_fold(codon_table)\n fold0_cnt = defaultdict(int)\n fold4_cnt = defaultdict(int)\n for codon in seq1 + seq2:\n # count sites at different codon position\n if codon != '---':\n fcodon[0][codon[0]] += 1\n fcodon[1][codon[1]] += 1\n fcodon[2][codon[2]] += 1\n # count sites in different degenerate fold class\n fold_num = codon_fold_dict[codon]\n for i, f in enumerate(fold_num):\n if f == '0':\n fold0_cnt[codon[i]] += 1\n elif f == '4':\n fold4_cnt[codon[i]] += 1\n f0_total = sum(fold0_cnt.values())\n f4_total = sum(fold4_cnt.values())\n for i, j in zip(fold0_cnt, fold4_cnt):\n fold0_cnt[i] = fold0_cnt[i] / f0_total\n fold4_cnt[i] = fold4_cnt[i] / f4_total\n # TODO:\n # the initial kappa is different from what yn00 gives,\n # try to find the problem.\n TV = _get_TV(seq1, seq2, codon_table=codon_table)\n k04 = (_get_kappa_t(fold0_cnt, TV), _get_kappa_t(fold4_cnt, TV))\n kappa = (f0_total * k04[0] + f4_total * k04[1]) / (f0_total + f4_total)\n # kappa = 2.4285\n # count synonymous sites and non-synonymous sites\n for i in range(3):\n tot = sum(fcodon[i].values())\n fcodon[i] = dict((j, k / tot) for j, k in fcodon[i].items())\n pi = defaultdict(int)\n for i in list(codon_table.forward_table.keys()) + codon_table.stop_codons:\n if 'U' not in i:\n pi[i] = 0\n for i in seq1 + seq2:\n pi[i] += 1\n S_sites1, N_sites1, bfreqSN1 = _count_site_YN00(seq1, seq2, pi,\n k=kappa,\n codon_table=codon_table)\n S_sites2, N_sites2, bfreqSN2 = _count_site_YN00(seq2, seq1, pi,\n k=kappa,\n codon_table=codon_table)\n N_sites = (N_sites1 + N_sites2) / 2\n S_sites = (S_sites1 + S_sites2) / 2\n bfreqSN = [{'A': 0, 'T': 0, 'C': 0, 'G': 0},\n {'A': 0, 'T': 0, 'C': 0, 'G': 0}]\n for i in range(2):\n for b in ('A', 'T', 'C', 'G'):\n bfreqSN[i][b] = (bfreqSN1[i][b] + bfreqSN2[i][b]) / 2\n # use NG86 method to get initial t and w\n SN = [0, 0]\n for i, j in zip(seq1, seq2):\n SN = [m + n for m, n in zip(SN, _count_diff_NG86(\n i, j,\n codon_table=codon_table)\n )\n ]\n ps = SN[0] / S_sites\n pn = SN[1] / N_sites\n p = sum(SN) / (S_sites + N_sites)\n w = log(1 - 4.0 / 3 * pn) / log(1 - 4.0 / 3 * ps)\n t = -3 / 4 * log(1 - 4 / 3 * p)\n tolerance = 1e-5\n dSdN_pre = [0, 0]\n for temp in range(20):\n # count synonymous and nonsynonymous differences under kappa, w, t\n codon_lst = [i for i in\n list(codon_table.forward_table.keys()) +\n codon_table.stop_codons if 'U' not in i]\n Q = _get_Q(pi, kappa, w, codon_lst, codon_table)\n P = expm(Q * t)\n TV = [0, 0, 0, 0] # synonymous/nonsynonymous transition/transversion\n sites = [0, 0]\n codon_npath = {}\n for i, j in zip(seq1, seq2):\n if i != '---' and j != '---':\n codon_npath.setdefault((i, j), 0)\n codon_npath[(i, j)] += 1\n for i in codon_npath:\n tv = _count_diff_YN00(i[0], i[1], P, codon_lst, codon_table)\n TV = [m + n * codon_npath[i] for m, n in zip(TV, tv)]\n TV = (TV[0] / S_sites, TV[1] / S_sites), (TV[2] / N_sites, TV[3] / N_sites)\n # according to the DistanceF84() function of yn00.c in paml,\n # the t (e.q. 10) appears in PMID: 10666704 is dS and dN\n dSdN = []\n for f, tv in zip(bfreqSN, TV):\n dSdN.append(_get_kappa_t(f, tv, t=True))\n t = dSdN[0] * 3 * S_sites / (S_sites + N_sites) + \\\n dSdN[1] * 3 * N_sites / (S_sites + N_sites)\n w = dSdN[1] / dSdN[0]\n if all(map(lambda x: x < tolerance, [abs(i - j) for i, j in zip(dSdN, dSdN_pre)])):\n return dSdN[1], dSdN[0] # dN, dS\n dSdN_pre = dSdN\n\n\ndef _get_TV(codon_lst1, codon_lst2, codon_table=default_codon_table):\n \"\"\"Get TV (PRIVATE).\n\n Arguments:\n - T - proportions of transitional differences\n - V - proportions of transversional differences\n\n \"\"\"\n purine = ('A', 'G')\n pyrimidine = ('C', 'T')\n TV = [0, 0]\n sites = 0\n for codon1, codon2 in zip(codon_lst1, codon_lst2):\n if \"---\" not in (codon1, codon2):\n for i, j in zip(codon1, codon2):\n if i == j:\n pass\n elif i in purine and j in purine:\n TV[0] += 1\n elif i in pyrimidine and j in pyrimidine:\n TV[0] += 1\n else:\n TV[1] += 1\n sites += 1\n return (TV[0] / sites, TV[1] / sites)\n # return (TV[0], TV[1])\n\n\ndef _get_kappa_t(pi, TV, t=False):\n \"\"\"Calculate kappa (PRIVATE).\n\n The following formula and variable names are according to PMID: 10666704\n \"\"\"\n pi['Y'] = pi['T'] + pi['C']\n pi['R'] = pi['A'] + pi['G']\n A = (2 * (pi['T'] * pi['C'] + pi['A'] * pi['G']) +\n 2 * (pi['T'] * pi['C'] * pi['R'] / pi['Y'] + pi['A'] * pi['G'] * pi['Y'] / pi['R']) *\n (1 - TV[1] / (2 * pi['Y'] * pi['R'])) - TV[0]) / \\\n (2 * (pi['T'] * pi['C'] / pi['Y'] + pi['A'] * pi['G'] / pi['R']))\n B = 1 - TV[1] / (2 * pi['Y'] * pi['R'])\n a = -0.5 * log(A) # this seems to be an error in YANG's original paper\n b = -0.5 * log(B)\n kappaF84 = a / b - 1\n if t is False:\n kappaHKY85 = 1 + (pi['T'] * pi['C'] / pi['Y'] + pi['A'] * pi['G'] / pi['R']) *\\\n kappaF84 / (pi['T'] * pi['C'] + pi['A'] * pi['G'])\n return kappaHKY85\n else:\n t = (4 * pi['T'] * pi['C'] * (1 + kappaF84 / pi['Y']) +\n 4 * pi['A'] * pi['G'] * (1 + kappaF84 / pi['R']) + 4 * pi['Y'] * pi['R']) * b\n return t\n\n\ndef _count_site_YN00(codon_lst1, codon_lst2, pi, k,\n codon_table=default_codon_table):\n \"\"\"Site counting method from Ina / Yang and Nielsen (PRIVATE).\n\n Method from `Ina (1995)`_ as modified by `Yang and Nielsen (2000)`_.\n This will return the total number of synonymous and nonsynonymous sites\n and base frequencies in each category. The function is equivalent to\n the ``CountSites()`` function in ``yn00.c`` of PAML.\n\n .. _`Ina (1995)`: https://doi.org/10.1007/BF00167113\n .. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236\n\n \"\"\"\n if len(codon_lst1) != len(codon_lst2):\n raise RuntimeError(\"Length of two codon_lst should be the same \"\n \"(%d and %d detected)\" %\n (len(codon_lst1), len(codon_lst2)))\n else:\n length = len(codon_lst1)\n purine = ('A', 'G')\n pyrimidine = ('T', 'C')\n base_tuple = ('A', 'T', 'C', 'G')\n codon_dict = codon_table.forward_table\n stop = codon_table.stop_codons\n codon_npath = {}\n for i, j in zip(codon_lst1, codon_lst2):\n if i != '---' and j != '---':\n codon_npath.setdefault((i, j), 0)\n codon_npath[(i, j)] += 1\n S_sites = N_sites = 0\n freqSN = [{'A': 0, 'T': 0, 'C': 0, 'G': 0}, # synonymous\n {'A': 0, 'T': 0, 'C': 0, 'G': 0}] # nonsynonymous\n for codon_pair, npath in codon_npath.items():\n codon = codon_pair[0]\n S = N = 0\n for pos in range(3):\n for base in base_tuple:\n if codon[pos] == base:\n continue\n neighbor_codon = codon[:pos] + base + codon[pos + 1:]\n if neighbor_codon in stop:\n continue\n weight = pi[neighbor_codon]\n if codon[pos] in pyrimidine and base in pyrimidine:\n weight *= k\n elif codon[pos] in purine and base in purine:\n weight *= k\n if codon_dict[codon] == codon_dict[neighbor_codon]:\n S += weight\n freqSN[0][base] += weight * npath\n else:\n N += weight\n freqSN[1][base] += weight * npath\n S_sites += S * npath\n N_sites += N * npath\n norm_const = 3 * length / (S_sites + N_sites)\n S_sites *= norm_const\n N_sites *= norm_const\n for i in freqSN:\n norm_const = sum(i.values())\n for b in i:\n i[b] /= norm_const\n return S_sites, N_sites, freqSN\n\n\ndef _count_diff_YN00(codon1, codon2, P, codon_lst,\n codon_table=default_codon_table):\n \"\"\"Count differences between two codons (three-letter string; PRIVATE).\n\n The function will weighted multiple pathways from codon1 to codon2\n according to P matrix of codon substitution. The proportion\n of transition and transversion (TV) will also be calculated in\n the function.\n \"\"\"\n if not isinstance(codon1, str) or not isinstance(codon2, str):\n raise TypeError(\"_count_diff_YN00 accepts string object \"\n \"to represent codon ({0}, {1} \"\n \"detected)\".format(type(codon1), type(codon2)))\n if len(codon1) != 3 or len(codon2) != 3:\n raise RuntimeError(\"codon should be three letter string ({0}, {1} \"\n \"detected)\".format(len(codon1), len(codon2)))\n TV = [0, 0, 0, 0] # transition and transversion counts (synonymous and nonsynonymous)\n site = 0\n if codon1 == '---' or codon2 == '---':\n return TV\n base_tuple = ('A', 'C', 'G', 'T')\n if not all(i in base_tuple for i in codon1):\n raise RuntimeError(\"Unrecognized character detected in codon1 {0} \"\n \"(Codons consist of \"\n \"A, T, C or G)\".format(codon1))\n if not all(i in base_tuple for i in codon2):\n raise RuntimeError(\"Unrecognized character detected in codon2 {0} \"\n \"(Codons consist of \"\n \"A, T, C or G)\".format(codon2))\n if codon1 == codon2:\n return TV\n else:\n diff_pos = []\n for i, k in enumerate(zip(codon1, codon2)):\n if k[0] != k[1]:\n diff_pos.append(i)\n\n def count_TV(codon1, codon2, diff, codon_table, weight=1):\n purine = ('A', 'G')\n pyrimidine = ('T', 'C')\n dic = codon_table.forward_table\n stop = codon_table.stop_codons\n if codon1 in stop or codon2 in stop:\n # stop codon is always considered as nonsynonymous\n if codon1[diff] in purine and codon2[diff] in purine:\n return [0, 0, weight, 0]\n elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:\n return [0, 0, weight, 0]\n else:\n return [0, 0, 0, weight]\n elif dic[codon1] == dic[codon2]:\n if codon1[diff] in purine and codon2[diff] in purine:\n return [weight, 0, 0, 0]\n elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:\n return [weight, 0, 0, 0]\n else:\n return [0, weight, 0, 0]\n else:\n if codon1[diff] in purine and codon2[diff] in purine:\n return [0, 0, weight, 0]\n elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:\n return [0, 0, weight, 0]\n else:\n return [0, 0, 0, weight]\n\n if len(diff_pos) == 1:\n prob = 1\n TV = [p + q for p, q in zip(TV, count_TV(codon1, codon2, diff_pos[0], codon_table))]\n elif len(diff_pos) == 2:\n codon2_aa = codon_table.forward_table[codon2]\n tmp_codon = [codon1[:i] + codon2[i] + codon1[i + 1:]\n for i in diff_pos]\n path_prob = []\n for i in tmp_codon:\n codon_idx = list(map(codon_lst.index, [codon1, i, codon2]))\n prob = (P[codon_idx[0], codon_idx[1]],\n P[codon_idx[1], codon_idx[2]])\n path_prob.append(prob[0] * prob[1])\n path_prob = [2 * i / sum(path_prob) for i in path_prob]\n for n, i in enumerate(diff_pos):\n temp_codon = codon1[:i] + codon2[i] + codon1[i + 1:]\n TV = [p + q for p, q in zip(TV, count_TV(codon1, temp_codon, i,\n codon_table,\n weight=path_prob[n] / 2))\n ]\n TV = [p + q for p, q in zip(TV, count_TV(codon1, temp_codon, i,\n codon_table,\n weight=path_prob[n] / 2))\n ]\n elif len(diff_pos) == 3:\n codon2_aa = codon_table.forward_table[codon2]\n paths = list(permutations([0, 1, 2], 3))\n path_prob = []\n tmp_codon = []\n for p in paths:\n tmp1 = codon1[:p[0]] + codon2[p[0]] + codon1[p[0] + 1:]\n tmp2 = tmp1[:p[1]] + codon2[p[1]] + tmp1[p[1] + 1:]\n tmp_codon.append((tmp1, tmp2))\n codon_idx = list(map(codon_lst.index, [codon1, tmp1, tmp2, codon2]))\n prob = (P[codon_idx[0], codon_idx[1]],\n P[codon_idx[1], codon_idx[2]],\n P[codon_idx[2], codon_idx[3]])\n path_prob.append(prob[0] * prob[1] * prob[2])\n path_prob = [3 * i / sum(path_prob) for i in path_prob]\n for i, j, k in zip(tmp_codon, path_prob, paths):\n TV = [p + q for p, q in zip(TV, count_TV(codon1, i[0], k[0],\n codon_table, weight=j / 3))\n ]\n TV = [p + q for p, q in zip(TV, count_TV(i[0], i[1], k[1],\n codon_table, weight=j / 3))\n ]\n TV = [p + q for p, q in zip(TV, count_TV(i[1], codon2, k[1],\n codon_table, weight=j / 3))\n ]\n if codon1 in codon_table.stop_codons or codon2 in codon_table.stop_codons:\n site = [0, 3]\n elif codon_table.forward_table[codon1] == codon_table.forward_table[codon2]:\n site = [3, 0]\n else:\n site = [0, 3]\n return TV\n\n\n#################################################################\n# private functions for Maximum Likelihood method\n#################################################################\n\ndef _ml(seq1, seq2, cmethod, codon_table):\n \"\"\"ML method main function (PRIVATE).\"\"\"\n from collections import Counter\n from scipy.optimize import minimize\n codon_cnt = Counter()\n pi = _get_pi(seq1, seq2, cmethod, codon_table=codon_table)\n for i, j in zip(seq1, seq2):\n # if i != j and ('---' not in (i, j)):\n if '---' not in (i, j):\n codon_cnt[(i, j)] += 1\n codon_lst = [i for i in\n list(codon_table.forward_table.keys()) + codon_table.stop_codons\n if 'U' not in i]\n\n # apply optimization\n def func(params, pi=pi, codon_cnt=codon_cnt, codon_lst=codon_lst,\n codon_table=codon_table):\n \"\"\"Temporary function, params = [t, k, w].\"\"\"\n return -_likelihood_func(\n params[0], params[1], params[2], pi,\n codon_cnt, codon_lst=codon_lst,\n codon_table=codon_table)\n\n # count sites\n opt_res = minimize(func, [1, 0.1, 2], method='L-BFGS-B',\n bounds=((1e-10, 20), (1e-10, 20), (1e-10, 10)),\n tol=1e-5)\n t, k, w = opt_res.x\n Q = _get_Q(pi, k, w, codon_lst, codon_table)\n Sd = Nd = 0\n for i, c1 in enumerate(codon_lst):\n for j, c2 in enumerate(codon_lst):\n if i != j:\n try:\n if codon_table.forward_table[c1] == \\\n codon_table.forward_table[c2]:\n # synonymous count\n Sd += pi[c1] * Q[i, j]\n else:\n # nonsynonymous count\n Nd += pi[c1] * Q[i, j]\n except KeyError:\n # This is probably due to stop codons\n pass\n Sd *= t\n Nd *= t\n # count differences (with w fixed to 1)\n opt_res = minimize(func, [1, 0.1, 2], method='L-BFGS-B',\n bounds=((1e-10, 20), (1e-10, 20), (1, 1)),\n tol=1e-5)\n t, k, w = opt_res.x\n Q = _get_Q(pi, k, w, codon_lst, codon_table)\n rhoS = rhoN = 0\n for i, c1 in enumerate(codon_lst):\n for j, c2 in enumerate(codon_lst):\n if i != j:\n try:\n if codon_table.forward_table[c1] == \\\n codon_table.forward_table[c2]:\n # synonymous count\n rhoS += pi[c1] * Q[i, j]\n else:\n # nonsynonymous count\n rhoN += pi[c1] * Q[i, j]\n except KeyError:\n # This is probably due to stop codons\n pass\n rhoS *= 3\n rhoN *= 3\n dN = Nd / rhoN\n dS = Sd / rhoS\n return dN, dS\n\n\ndef _get_pi(seq1, seq2, cmethod, codon_table=default_codon_table):\n \"\"\"Obtain codon frequency dict (pi) from two codon list (PRIVATE).\n\n This function is designed for ML method. Available counting methods\n (cfreq) are F1x4, F3x4 and F64.\n \"\"\"\n # TODO:\n # Stop codon should not be allowed according to Yang.\n # Try to modify this!\n pi = {}\n if cmethod == 'F1x4':\n fcodon = {'A': 0, 'G': 0, 'C': 0, 'T': 0}\n for i in seq1 + seq2:\n if i != '---':\n for c in i:\n fcodon[c] += 1\n tot = sum(fcodon.values())\n fcodon = dict((j, k / tot) for j, k in fcodon.items())\n for i in codon_table.forward_table.keys() + codon_table.stop_codons:\n if 'U' not in i:\n pi[i] = fcodon[i[0]] * fcodon[i[1]] * fcodon[i[2]]\n elif cmethod == 'F3x4':\n # three codon position\n fcodon = [{'A': 0, 'G': 0, 'C': 0, 'T': 0},\n {'A': 0, 'G': 0, 'C': 0, 'T': 0},\n {'A': 0, 'G': 0, 'C': 0, 'T': 0}]\n for i in seq1 + seq2:\n if i != '---':\n fcodon[0][i[0]] += 1\n fcodon[1][i[1]] += 1\n fcodon[2][i[2]] += 1\n for i in range(3):\n tot = sum(fcodon[i].values())\n fcodon[i] = dict((j, k / tot) for j, k in fcodon[i].items())\n for i in list(codon_table.forward_table.keys()) + \\\n codon_table.stop_codons:\n if 'U' not in i:\n pi[i] = fcodon[0][i[0]] * fcodon[1][i[1]] * fcodon[2][i[2]]\n elif cmethod == 'F61':\n for i in codon_table.forward_table.keys() + codon_table.stop_codons:\n if 'U' not in i:\n pi[i] = 0.1\n for i in seq1 + seq2:\n if i != '---':\n pi[i] += 1\n tot = sum(pi.values())\n pi = dict((j, k / tot) for j, k in pi.items())\n return pi\n\n\ndef _q(i, j, pi, k, w, codon_table=default_codon_table):\n \"\"\"Q matrix for codon substitution (PRIVATE).\n\n Arguments:\n - i, j : three letter codon string\n - pi : expected codon frequency\n - k : transition/transversion ratio\n - w : nonsynonymous/synonymous rate ratio\n - codon_table: Bio.Data.CodonTable object\n\n \"\"\"\n if i == j:\n # diagonal elements is the sum of all other elements\n return 0\n if i in codon_table.stop_codons or j in codon_table.stop_codons:\n return 0\n if (i not in pi) or (j not in pi):\n return 0\n purine = ('A', 'G')\n pyrimidine = ('T', 'C')\n diff = []\n for n, (c1, c2) in enumerate(zip(i, j)):\n if c1 != c2:\n diff.append((n, c1, c2))\n if len(diff) >= 2:\n return 0\n if codon_table.forward_table[i] == codon_table.forward_table[j]:\n # synonymous substitution\n if diff[0][1] in purine and diff[0][2] in purine:\n # transition\n return k * pi[j]\n elif diff[0][1] in pyrimidine and diff[0][2] in pyrimidine:\n # transition\n return k * pi[j]\n else:\n # transversion\n return pi[j]\n else:\n # nonsynonymous substitution\n if diff[0][1] in purine and diff[0][2] in purine:\n # transition\n return w * k * pi[j]\n elif diff[0][1] in pyrimidine and diff[0][2] in pyrimidine:\n # transition\n return w * k * pi[j]\n else:\n # transversion\n return w * pi[j]\n\n\ndef _get_Q(pi, k, w, codon_lst, codon_table):\n \"\"\"Q matrix for codon substitution (PRIVATE).\"\"\"\n import numpy as np\n codon_num = len(codon_lst)\n Q = np.zeros((codon_num, codon_num))\n for i in range(codon_num):\n for j in range(codon_num):\n if i != j:\n Q[i, j] = _q(codon_lst[i], codon_lst[j], pi, k, w,\n codon_table=codon_table)\n nucl_substitutions = 0\n for i in range(codon_num):\n Q[i, i] = -sum(Q[i, :])\n try:\n nucl_substitutions += pi[codon_lst[i]] * (-Q[i, i])\n except KeyError:\n pass\n Q = Q / nucl_substitutions\n return Q\n\n\ndef _likelihood_func(t, k, w, pi, codon_cnt, codon_lst, codon_table):\n \"\"\"Likelihood function for ML method (PRIVATE).\"\"\"\n from scipy.linalg import expm\n Q = _get_Q(pi, k, w, codon_lst, codon_table)\n P = expm(Q * t)\n likelihood = 0\n for i, c1 in enumerate(codon_lst):\n for j, c2 in enumerate(codon_lst):\n if (c1, c2) in codon_cnt:\n if P[i, j] * pi[c1] <= 0:\n likelihood += codon_cnt[(c1, c2)] * 0\n else:\n likelihood += codon_cnt[(c1, c2)] * log(pi[c1] * P[i, j])\n return likelihood\n\n\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n run_doctest()\n"
] | [
[
"scipy.linalg.expm",
"scipy.optimize.minimize",
"numpy.zeros"
]
] |
ngduduong/captum | [
"6fe5f0f23ea975e73e0c0dee79bdc01b4223d283"
] | [
"captum/attr/_utils/common.py"
] | [
"#!/usr/bin/env python3\nfrom typing import Optional, Tuple, Union, overload\n\nimport torch\nfrom torch import Tensor\n\nfrom enum import Enum\nfrom inspect import signature\n\nfrom .approximation_methods import SUPPORTED_METHODS\n\n\nclass ExpansionTypes(Enum):\n repeat = 1\n repeat_interleave = 2\n\n\ndef safe_div(denom, quotient, default_value=None):\n r\"\"\"\n A simple utility function to perform `denom / quotient`\n if the statement is undefined => result will be `default_value`\n \"\"\"\n return denom / quotient if quotient != 0.0 else default_value\n\n\ndef _validate_target(num_samples, target):\n if isinstance(target, list) or (\n isinstance(target, torch.Tensor) and torch.numel(target) > 1\n ):\n assert num_samples == len(target), (\n \"The number of samples provied in the\"\n \"input {} does not match with the number of targets. {}\".format(\n num_samples, len(target)\n )\n )\n\n\ndef _validate_input(\n inputs,\n baselines,\n n_steps=50,\n method=\"riemann_trapezoid\",\n draw_baseline_from_distrib=False,\n):\n assert len(inputs) == len(baselines), (\n \"Input and baseline must have the same \"\n \"dimensions, baseline has {} features whereas input has {}.\".format(\n len(baselines), len(inputs)\n )\n )\n\n for input, baseline in zip(inputs, baselines):\n if draw_baseline_from_distrib:\n assert (\n isinstance(baseline, (int, float))\n or input.shape[1:] == baseline.shape[1:]\n ), (\n \"The samples in input and baseline batches must have\"\n \" the same shape or the baseline corresponding to the\"\n \" input tensor must be a scalar.\"\n \" Found baseline: {} and input: {} \".format(baseline, input)\n )\n else:\n assert (\n isinstance(baseline, (int, float))\n or input.shape == baseline.shape\n or baseline.shape[0] == 1\n ), (\n \"Baseline can be provided as a tensor for just one input and\"\n \" broadcasted to the batch or input and baseline must have the\"\n \" same shape or the baseline corresponding to each input tensor\"\n \" must be a scalar. Found baseline: {} and input: {}\".format(\n baseline, input\n )\n )\n\n assert (\n n_steps >= 0\n ), \"The number of steps must be a positive integer. \" \"Given: {}\".format(n_steps)\n\n assert method in SUPPORTED_METHODS, (\n \"Approximation method must be one for the following {}. \"\n \"Given {}\".format(SUPPORTED_METHODS, method)\n )\n\n\ndef _validate_noise_tunnel_type(nt_type, supported_noise_tunnel_types):\n assert nt_type in supported_noise_tunnel_types, (\n \"Noise types must be either `smoothgrad`, `smoothgrad_sq` or `vargrad`. \"\n \"Given {}\".format(nt_type)\n )\n\n\n@overload\ndef _format_tensor_into_tuples(inputs: None) -> None:\n ...\n\n\n@overload\ndef _format_tensor_into_tuples(\n inputs: Union[Tensor, Tuple[Tensor, ...]]\n) -> Tuple[Tensor, ...]:\n ...\n\n\ndef _format_tensor_into_tuples(\n inputs: Optional[Union[Tensor, Tuple[Tensor, ...]]]\n) -> Optional[Tuple[Tensor, ...]]:\n if inputs is None:\n return None\n if not isinstance(inputs, tuple):\n assert isinstance(\n inputs, torch.Tensor\n ), \"`inputs` must have type \" \"torch.Tensor but {} found: \".format(type(inputs))\n inputs = (inputs,)\n return inputs\n\n\ndef _format_input(inputs: Union[Tensor, Tuple[Tensor, ...]]) -> Tuple[Tensor, ...]:\n return _format_tensor_into_tuples(inputs)\n\n\ndef _format_additional_forward_args(additional_forward_args):\n if additional_forward_args is not None and not isinstance(\n additional_forward_args, tuple\n ):\n additional_forward_args = (additional_forward_args,)\n return additional_forward_args\n\n\ndef _format_baseline(\n baselines: Optional[\n Union[Tensor, int, float, Tuple[Union[Tensor, int, float], ...]]\n ],\n inputs: Tuple[Tensor, ...],\n) -> Tuple[Union[Tensor, int, float], ...]:\n if baselines is None:\n return _zeros(inputs)\n\n if not isinstance(baselines, tuple):\n baselines = (baselines,)\n\n for baseline in baselines:\n assert isinstance(\n baseline, (torch.Tensor, int, float)\n ), \"baseline input argument must be either a torch.Tensor or a number \\\n however {} detected\".format(\n type(baseline)\n )\n\n return baselines\n\n\ndef _format_input_baseline(\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n baselines: Optional[\n Union[Tensor, int, float, Tuple[Union[Tensor, int, float], ...]]\n ],\n) -> Tuple[Tuple[Tensor, ...], Tuple[Union[Tensor, int, float], ...]]:\n inputs = _format_input(inputs)\n baselines = _format_baseline(baselines, inputs)\n return inputs, baselines\n\n\n# This function can potentially be merged with the `format_baseline` function\n# however, since currently not all algorithms support baselines of type\n# callable this will be kept in a separate function.\ndef _format_callable_baseline(baselines, inputs):\n if callable(baselines):\n # Note: this assumes that if baselines is a function and if it takes\n # arguments, then the first argument is the `inputs`.\n # This can be expanded in the future with better type checks\n baseline_parameters = signature(baselines).parameters\n if len(baseline_parameters) == 0:\n baselines = baselines()\n else:\n baselines = baselines(inputs)\n return _format_baseline(baselines, inputs)\n\n\ndef _format_attributions(is_inputs_tuple, attributions):\n r\"\"\"\n In case input is a tensor and the attributions is returned in form of a\n tensor we take the first element of the attributions' tuple to match the\n same shape signatues of the inputs\n \"\"\"\n assert isinstance(attributions, tuple), \"Attributions must be in shape of a tuple\"\n assert is_inputs_tuple or len(attributions) == 1, (\n \"The input is a single tensor however the attributions aren't.\"\n \"The number of attributed tensors is: {}\".format(len(attributions))\n )\n return attributions if is_inputs_tuple else attributions[0]\n\n\ndef _format_and_verify_strides(strides, inputs):\n # Formats strides, which are necessary for occlusion\n # Assumes inputs are already formatted (in tuple)\n if strides is None:\n strides = tuple(1 for input in inputs)\n if len(inputs) == 1 and not (isinstance(strides, tuple) and len(strides) == 1):\n strides = (strides,)\n assert isinstance(strides, tuple) and len(strides) == len(\n inputs\n ), \"Strides must be provided for each input tensor.\"\n for i in range(len(inputs)):\n assert isinstance(strides[i], int) or (\n isinstance(strides[i], tuple)\n and len(strides[i]) == len(inputs[i].shape) - 1\n ), (\n \"Stride for input index {} is {}, which is invalid for input with \"\n \"shape {}. It must be either an int or a tuple with length equal to \"\n \"len(input_shape) - 1.\"\n ).format(\n i, strides[i], inputs[i].shape\n )\n\n return strides\n\n\ndef _format_and_verify_sliding_window_shapes(sliding_window_shapes, inputs):\n # Formats shapes of sliding windows, which is necessary for occlusion\n # Assumes inputs is already formatted (in tuple)\n if not isinstance(sliding_window_shapes[0], tuple):\n sliding_window_shapes = (sliding_window_shapes,)\n assert len(sliding_window_shapes) == len(\n inputs\n ), \"Must provide sliding window dimensions for each input tensor.\"\n for i in range(len(inputs)):\n assert (\n isinstance(sliding_window_shapes[i], tuple)\n and len(sliding_window_shapes[i]) == len(inputs[i].shape) - 1\n ), (\n \"Occlusion shape for input index {} is {} but should be a tuple with \"\n \"{} dimensions.\"\n ).format(\n i, sliding_window_shapes[i], len(inputs[i].shape) - 1\n )\n return sliding_window_shapes\n\n\ndef _compute_conv_delta_and_format_attrs(\n attr_algo,\n return_convergence_delta,\n attributions,\n start_point,\n end_point,\n additional_forward_args,\n target,\n is_inputs_tuple=False,\n):\n if return_convergence_delta:\n # computes convergence error\n delta = attr_algo.compute_convergence_delta(\n attributions,\n start_point,\n end_point,\n additional_forward_args=additional_forward_args,\n target=target,\n )\n return _format_attributions(is_inputs_tuple, attributions), delta\n else:\n return _format_attributions(is_inputs_tuple, attributions)\n\n\ndef _zeros(inputs):\n r\"\"\"\n Takes a tuple of tensors as input and returns a tuple that has the same\n length as `inputs` with each element as the integer 0.\n \"\"\"\n return tuple(0 for input in inputs)\n\n\ndef _tensorize_baseline(inputs, baselines):\n def _tensorize_single_baseline(baseline, input):\n if isinstance(baseline, (int, float)):\n return torch.full_like(input, baseline)\n if input.shape[0] > baseline.shape[0] and baseline.shape[0] == 1:\n return torch.cat([baseline] * input.shape[0])\n return baseline\n\n assert isinstance(inputs, tuple) and isinstance(baselines, tuple), (\n \"inputs and baselines must\"\n \"have tuple type but found baselines: {} and inputs: {}\".format(\n type(baselines), type(inputs)\n )\n )\n return tuple(\n _tensorize_single_baseline(baseline, input)\n for baseline, input in zip(baselines, inputs)\n )\n\n\ndef _reshape_and_sum(tensor_input, num_steps, num_examples, layer_size):\n # Used for attribution methods which perform integration\n # Sums across integration steps by reshaping tensor to\n # (num_steps, num_examples, (layer_size)) and summing over\n # dimension 0. Returns a tensor of size (num_examples, (layer_size))\n return torch.sum(\n tensor_input.reshape((num_steps, num_examples) + layer_size), dim=0\n )\n\n\ndef _verify_select_column(output, target):\n target = (target,) if isinstance(target, int) else target\n assert (\n len(target) <= len(output.shape) - 1\n ), \"Cannot choose target column with output shape %r.\" % (output.shape,)\n return output[(slice(None), *target)]\n\n\ndef _select_targets(output, target):\n if target is None:\n return output\n\n num_examples = output.shape[0]\n dims = len(output.shape)\n if isinstance(target, (int, tuple)):\n return _verify_select_column(output, target)\n elif isinstance(target, torch.Tensor):\n if torch.numel(target) == 1 and isinstance(target.item(), int):\n return _verify_select_column(output, target.item())\n elif len(target.shape) == 1 and torch.numel(target) == num_examples:\n assert dims == 2, \"Output must be 2D to select tensor of targets.\"\n return torch.gather(output, 1, target.reshape(len(output), 1))\n else:\n raise AssertionError(\n \"Tensor target dimension %r is not valid.\" % (target.shape,)\n )\n elif isinstance(target, list):\n assert len(target) == num_examples, \"Target list length does not match output!\"\n if type(target[0]) is int:\n assert dims == 2, \"Output must be 2D to select tensor of targets.\"\n return torch.gather(output, 1, torch.tensor(target).reshape(len(output), 1))\n elif type(target[0]) is tuple:\n return torch.stack(\n [output[(i,) + targ_elem] for i, targ_elem in enumerate(target)]\n )\n else:\n raise AssertionError(\"Target element type in list is not valid.\")\n else:\n raise AssertionError(\"Target type %r is not valid.\" % target)\n\n\ndef _run_forward(forward_func, inputs, target=None, additional_forward_args=None):\n # make everything a tuple so that it is easy to unpack without\n # using if-statements\n inputs = _format_input(inputs)\n additional_forward_args = _format_additional_forward_args(additional_forward_args)\n\n output = forward_func(\n *(*inputs, *additional_forward_args)\n if additional_forward_args is not None\n else inputs\n )\n return _select_targets(output, target)\n\n\ndef _expand_additional_forward_args(\n additional_forward_args, n_steps, expansion_type=ExpansionTypes.repeat\n):\n def _expand_tensor_forward_arg(\n additional_forward_arg, n_steps, expansion_type=ExpansionTypes.repeat\n ):\n if len(additional_forward_arg.size()) == 0:\n return additional_forward_arg\n if expansion_type == ExpansionTypes.repeat:\n return torch.cat([additional_forward_arg] * n_steps, dim=0)\n elif expansion_type == ExpansionTypes.repeat_interleave:\n return additional_forward_arg.repeat_interleave(n_steps, dim=0)\n else:\n raise NotImplementedError(\n \"Currently only `repeat` and `repeat_interleave`\"\n \" expansion_types are supported\"\n )\n\n if additional_forward_args is None:\n return None\n\n return tuple(\n _expand_tensor_forward_arg(additional_forward_arg, n_steps, expansion_type)\n if isinstance(additional_forward_arg, torch.Tensor)\n else additional_forward_arg\n for additional_forward_arg in additional_forward_args\n )\n\n\ndef _expand_target(target, n_steps, expansion_type=ExpansionTypes.repeat):\n if isinstance(target, list):\n if expansion_type == ExpansionTypes.repeat:\n return target * n_steps\n elif expansion_type == ExpansionTypes.repeat_interleave:\n expanded_target = []\n for i in target:\n expanded_target.extend([i] * n_steps)\n return expanded_target\n else:\n raise NotImplementedError(\n \"Currently only `repeat` and `repeat_interleave`\"\n \" expansion_types are supported\"\n )\n\n elif isinstance(target, torch.Tensor) and torch.numel(target) > 1:\n if expansion_type == ExpansionTypes.repeat:\n return torch.cat([target] * n_steps, dim=0)\n elif expansion_type == ExpansionTypes.repeat_interleave:\n return target.repeat_interleave(n_steps, dim=0)\n else:\n raise NotImplementedError(\n \"Currently only `repeat` and `repeat_interleave`\"\n \" expansion_types are supported\"\n )\n\n return target\n\n\ndef _call_custom_attribution_func(\n custom_attribution_func, multipliers, inputs, baselines\n):\n assert callable(custom_attribution_func), (\n \"`custom_attribution_func`\"\n \" must be a callable function but {} provided\".format(\n type(custom_attribution_func)\n )\n )\n custom_attr_func_params = signature(custom_attribution_func).parameters\n assert len(custom_attr_func_params) in range(1, 4), (\n \"`custom_attribution_func`\" \" must take at least one and at most 3 arguments\"\n )\n\n if len(custom_attr_func_params) == 1:\n return custom_attribution_func(multipliers)\n elif len(custom_attr_func_params) == 2:\n return custom_attribution_func(multipliers, inputs)\n elif len(custom_attr_func_params) == 3:\n return custom_attribution_func(multipliers, inputs, baselines)\n\n\ndef _extract_device(module, hook_inputs, hook_outputs):\n params = list(module.parameters())\n if (\n (hook_inputs is None or len(hook_inputs) == 0)\n and (hook_outputs is None or len(hook_outputs) == 0)\n and len(params) == 0\n ):\n raise RuntimeError(\n \"\"\"Unable to extract device information for the module\n {}. Both inputs and outputs to the forward hook and\n `module.parameters()` are empty.\n The reason that the inputs to the forward hook are empty\n could be due to the fact that the arguments to that\n module {} are all named and are passed as named\n variables to its forward function.\n \"\"\".format(\n module, module\n )\n )\n if hook_inputs is not None and len(hook_inputs) > 0:\n return hook_inputs[0].device\n if hook_outputs is not None and len(hook_outputs) > 0:\n return hook_outputs[0].device\n\n return params[0].device\n\n\nclass MaxList:\n \"\"\"Keep track of N maximal items\n\n Implementation of MaxList:\n for keeping track of the N top values of a large collection of items.\n Maintains a sorted list of the top N items that can be fetched with\n getlist().\n\n Example use:\n m = MaxList(2, key=lamda x: len(x))\n ml.add(\"Hello World\")\n ml.add(\"Mermaid Man!!!!\")\n ml.add(\"Why?\")\n ml.getlist() -> [\"Mermaid Man!!!!\", \"Hello World\"]\n\n If storing values that are not comparable, please provide a key function that\n that maps the values to some numeric value.\n \"\"\"\n\n def __init__(self, size, key=lambda x: x):\n self.size = size\n self.key = key\n self.list = []\n\n def add(self, item):\n \"\"\"Add an element to the MaxList\n\n Args:\n item: the item that you want to add to the MaxList\n \"\"\"\n value = self.key(item)\n if len(self.list) < self.size:\n if len(self.list) == 0:\n self.list.append((value, item))\n elif self.list[-1][0] >= value:\n self.list.append((value, item))\n else:\n self._insert(item, value)\n if self.list[-1][0] < value:\n self._insert(item, value)\n\n def get_list(self):\n \"\"\"Retrive the list of N maximal items in sorted order\n\n Returns:\n list: the sorted list of maximal items\n \"\"\"\n return [item[1] for item in self.list]\n\n def _insert(self, item, value):\n if len(self.list) == 0:\n self.list.append((value, item))\n\n for i in range(len(self.list)):\n if self.list[i][0] < value:\n self.list.insert(i, (value, item))\n break\n self.list = self.list[: self.size]\n"
] | [
[
"torch.cat",
"torch.tensor",
"torch.numel",
"torch.full_like"
]
] |
mauriceqch/pcc_attr_folding | [
"2fc37de7fb146a724ebada2e39df51de272fa01a"
] | [
"src/ops/nn_distance/nn_distance_test.py"
] | [
"#!/usr/bin/python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport unittest\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom nn_distance import nn_distance\nfrom nn_distance2 import nn_distance2\n\n\ndef simple_nn(xyz1, xyz2):\n def is_valid_shape(shape):\n return len(shape) == 3 and shape[-1] == 3\n assert(is_valid_shape(xyz1.shape))\n assert(is_valid_shape(xyz2.shape))\n assert(xyz1.shape[0] == xyz2.shape[0])\n diff = np.expand_dims(xyz1, -2) - np.expand_dims(xyz2, -3)\n square_dst = np.sum(diff**2, axis=-1)\n dst1 = np.min(square_dst, axis=-1)\n dst2 = np.min(square_dst, axis=-2)\n idx1 = np.argmin(square_dst, axis=-1)\n idx2 = np.argmin(square_dst, axis=-2)\n return dst1, idx1, dst2, idx2\n\n\ndef simple_nn_k(xyz1, xyz2, k):\n def is_valid_shape(shape):\n return len(shape) == 3 and shape[-1] == 3\n assert(is_valid_shape(xyz1.shape))\n assert(is_valid_shape(xyz2.shape))\n assert(xyz1.shape[0] == xyz2.shape[0])\n diff = np.expand_dims(xyz1, -2) - np.expand_dims(xyz2, -3)\n square_dst = np.sum(diff**2, axis=-1)\n idx1 = np.argpartition(square_dst, k, axis=-1)[:, :, k]\n idx2 = np.argpartition(square_dst, k, axis=-2)[:, k, :]\n dst1 = np.zeros(idx1.shape)\n dst2 = np.zeros(idx2.shape)\n for i in range(idx1.shape[0]):\n for j in range(idx1.shape[1]):\n dst1[i, j] = square_dst[i, j, idx1[i, j]]\n for i in range(idx2.shape[0]):\n for j in range(idx2.shape[1]):\n dst2[i, j] = square_dst[i, idx2[i, j], j]\n return dst1, idx1, dst2, idx2\n\n\ndef tf_nn(xyz1, xyz2, device, dist_function):\n graph = tf.Graph()\n with graph.as_default():\n with tf.device(device):\n xyz1 = tf.constant(xyz1)\n xyz2 = tf.constant(xyz2)\n nn = dist_function(xyz1, xyz2)\n\n with tf.Session(graph=graph) as sess:\n actual = sess.run(nn)\n return actual\n\ndevices = ['/cpu:0', '/gpu:0']\n\n\nclass TestNnDistance(unittest.TestCase):\n\n def _compare_values(self, actual, expected):\n\n self.assertEqual(len(actual), len(expected))\n # distances\n for i in [0, 2]:\n # TF 1.15, g++ 7.4, cuda 10.0, cudnn 7.4.2, GTX 1080 Ti\n # relative difference slightly exceeds 1e-7\n np.testing.assert_allclose(actual[i], expected[i], rtol=2e-7)\n # indices\n for i in [1, 3]:\n np.testing.assert_equal(actual[i], expected[i])\n\n def _compare(self, xyz1, xyz2, expected):\n for device in devices:\n actual = tf_nn(xyz1, xyz2, device, nn_distance)\n self._compare_values(actual, expected)\n\n def _compare2(self, xyz1, xyz2, expected):\n for device in devices:\n actual = tf_nn(xyz1, xyz2, device, nn_distance2)\n self._compare_values(actual, expected)\n\n def test_small(self):\n xyz1 = np.array([[[0, 0, 0], [1, 0, 0], [0, 1, 0]]], dtype=np.float32)\n xyz2 = np.array([[[-100, 0, 0], [2, 0, 0]]], dtype=np.float32)\n expected = \\\n np.array([[4, 1, 5]]), \\\n np.array([[1, 1, 1]]), \\\n np.array([[10000, 1]]), \\\n np.array([[0, 1]])\n self._compare(xyz1, xyz2, expected)\n\n def test_small_nn2(self):\n xyz1 = np.array([[[0, 0, 0], [1, 0, 0], [0, 1, 0]]], dtype=np.float32)\n xyz2 = np.array([[[-100, 0, 0], [2, 0, 0]]], dtype=np.float32)\n expected = \\\n np.array([[10000, 10201, 10001]]), \\\n np.array([[0, 0, 0]]), \\\n np.array([[10001, 4]]), \\\n np.array([[2, 0]])\n self._compare2(xyz1, xyz2, expected)\n\n def test_big(self):\n batch_size = 5\n n1 = 10\n n2 = 20\n xyz1 = np.random.randn(batch_size, n1, 3).astype(np.float32)\n xyz2 = np.random.randn(batch_size, n2, 3).astype(np.float32)\n expected = simple_nn(xyz1, xyz2)\n self._compare(xyz1, xyz2, expected)\n\n def test_big_nn2(self):\n batch_size = 5\n n1 = 10\n n2 = 20\n xyz1 = np.random.randn(batch_size, n1, 3).astype(np.float32)\n xyz2 = np.random.randn(batch_size, n2, 3).astype(np.float32)\n expected = simple_nn_k(xyz1, xyz2, 1)\n self._compare2(xyz1, xyz2, expected)\n\nif __name__ == '__main__':\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n unittest.main()\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.argmin",
"tensorflow.compat.v1.Graph",
"numpy.zeros",
"numpy.sum",
"numpy.testing.assert_equal",
"numpy.random.randn",
"numpy.min",
"tensorflow.compat.v1.Session",
"numpy.argpartition",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.constant",
"numpy.expand_dims"
]
] |
Vcevalod/nnUNet4 | [
"86d4ff739e537e7054ff5bd8305982287b40a16c"
] | [
"nnunet/inference/segmentation_export.py"
] | [
"# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom copy import deepcopy\nimport numpy as np\nimport SimpleITK as sitk\nfrom nnunet.preprocessing.preprocessing import get_lowres_axis, get_do_separate_z, resample_data_or_seg\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\n\ndef save_segmentation_nifti_from_softmax(segmentation_softmax, out_fname, dct, order=1, region_class_order=None,\n seg_postprogess_fn=None, seg_postprocess_args=None, resampled_npz_fname=None,\n non_postprocessed_fname=None, force_separate_z=None):\n \"\"\"\n This is a utility for writing segmentations to nifto and npz. It requires the data to have been preprocessed by\n GenericPreprocessor because it depends on the property dictionary output (dct) to know the geometry of the original\n data. segmentation_softmax does not have to have the same size in pixels as the original data, it will be\n resampled to match that. This is generally useful because the spacings our networks operate on are most of the time\n not the native spacings of the image data.\n If seg_postprogess_fn is not None then seg_postprogess_fnseg_postprogess_fn(segmentation, *seg_postprocess_args)\n will be called before nifto export\n There is a problem with python process communication that prevents us from communicating obejcts\n larger than 2 GB between processes (basically when the length of the pickle string that will be sent is\n communicated by the multiprocessing.Pipe object then the placeholder (\\%i I think) does not allow for long\n enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually\n patching system python code.) We circumvent that problem here by saving softmax_pred to a npy file that will\n then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either\n filename or np.ndarray for segmentation_softmax and will handle this automatically\n :param segmentation_softmax:\n :param out_fname:\n :param dct:\n :param order:\n :param region_class_order:\n :param seg_postprogess_fn:\n :param seg_postprocess_args:\n :param resampled_npz_fname:\n :param non_postprocessed_fname:\n :param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always\n /never resample along z separately. Do not touch unless you know what you are doing\n :return:\n \"\"\"\n if isinstance(segmentation_softmax, str):\n assert isfile(segmentation_softmax), \"If isinstance(segmentation_softmax, str) then \" \\\n \"isfile(segmentation_softmax) must be True\"\n del_file = deepcopy(segmentation_softmax)\n segmentation_softmax = np.load(segmentation_softmax)\n os.remove(del_file)\n\n # first resample, then put result into bbox of cropping, then save\n current_shape = segmentation_softmax.shape\n shape_original_after_cropping = dct.get('size_after_cropping')\n shape_original_before_cropping = dct.get('original_size_of_raw_data')\n # current_spacing = dct.get('spacing_after_resampling')\n # original_spacing = dct.get('original_spacing')\n\n if np.any([i != j for i, j in zip(np.array(current_shape[1:]), np.array(shape_original_after_cropping))]):\n if force_separate_z is None:\n if get_do_separate_z(dct.get('original_spacing')):\n do_separate_z = True\n lowres_axis = get_lowres_axis(dct.get('original_spacing'))\n elif get_do_separate_z(dct.get('spacing_after_resampling')):\n do_separate_z = True\n lowres_axis = get_lowres_axis(dct.get('spacing_after_resampling'))\n else:\n do_separate_z = False\n lowres_axis = None\n else:\n do_separate_z = force_separate_z\n if do_separate_z:\n lowres_axis = get_lowres_axis(dct.get('original_spacing'))\n else:\n lowres_axis = None\n\n print(\"separate z:\",do_separate_z, \"lowres axis\", lowres_axis)\n seg_old_spacing = resample_data_or_seg(segmentation_softmax, shape_original_after_cropping, is_seg=False,\n axis=lowres_axis, order=order, do_separate_z=do_separate_z, cval=0)\n #seg_old_spacing = resize_softmax_output(segmentation_softmax, shape_original_after_cropping, order=order)\n else:\n seg_old_spacing = segmentation_softmax\n\n if resampled_npz_fname is not None:\n np.savez_compressed(resampled_npz_fname, softmax=seg_old_spacing.astype(np.float16))\n save_pickle(dct, resampled_npz_fname[:-4] + \".pkl\")\n\n if region_class_order is None:\n seg_old_spacing = seg_old_spacing.argmax(0)\n else:\n seg_old_spacing_final = np.zeros(seg_old_spacing.shape[1:])\n for i, c in enumerate(region_class_order):\n seg_old_spacing_final[seg_old_spacing[i] > 0.5] = c\n seg_old_spacing = seg_old_spacing_final\n\n bbox = dct.get('crop_bbox')\n\n if bbox is not None:\n seg_old_size = np.zeros(shape_original_before_cropping)\n for c in range(3):\n bbox[c][1] = np.min((bbox[c][0] + seg_old_spacing.shape[c], shape_original_before_cropping[c]))\n seg_old_size[bbox[0][0]:bbox[0][1],\n bbox[1][0]:bbox[1][1],\n bbox[2][0]:bbox[2][1]] = seg_old_spacing\n else:\n seg_old_size = seg_old_spacing\n\n if seg_postprogess_fn is not None:\n seg_old_size_postprocessed = seg_postprogess_fn(np.copy(seg_old_size), *seg_postprocess_args)\n else:\n seg_old_size_postprocessed = seg_old_size\n\n seg_resized_itk = sitk.GetImageFromArray(seg_old_size_postprocessed.astype(np.uint8))\n seg_resized_itk.SetSpacing(dct['itk_spacing'])\n seg_resized_itk.SetOrigin(dct['itk_origin'])\n seg_resized_itk.SetDirection(dct['itk_direction'])\n sitk.WriteImage(seg_resized_itk, out_fname)\n\n if (non_postprocessed_fname is not None) and (seg_postprogess_fn is not None):\n seg_resized_itk = sitk.GetImageFromArray(seg_old_size.astype(np.uint8))\n seg_resized_itk.SetSpacing(dct['itk_spacing'])\n seg_resized_itk.SetOrigin(dct['itk_origin'])\n seg_resized_itk.SetDirection(dct['itk_direction'])\n sitk.WriteImage(seg_resized_itk, non_postprocessed_fname)\n\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.copy",
"numpy.load",
"numpy.min"
]
] |
thanhhau097/mlops | [
"6073dd75882c37f84a3c082acec9db9199492319"
] | [
"deployment/interface/main.py"
] | [
"import cv2\nimport gradio as gr\nimport numpy as np\n\nfrom modeling.inference import MNISTInference\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])\n\n\ndef predict(img):\n img = cv2.resize(img, (28, 28))\n img = np.array(rgb2gray(img), dtype=np.float32)\n return str(model.predict(img))\n\n\nmodel = MNISTInference(\"weights/mnist_model.pt\")\niface = gr.Interface(predict, \"image\", \"text\")\niface.launch()\n"
] | [
[
"numpy.dot"
]
] |
nilskk/models | [
"dfb8bd66b54aa7f3c574089ed24b30b2e5ffa41c"
] | [
"research/object_detection/utils/learning_schedules.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Library of common learning rate schedules.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\nimport math\n\n\ndef _learning_rate_return_value(eager_decay_rate):\n \"\"\"Helper function to return proper learning rate based on tf version.\"\"\"\n if tf.executing_eagerly():\n return eager_decay_rate\n else:\n return eager_decay_rate()\n\n\ndef exponential_decay_with_burnin(global_step,\n learning_rate_base,\n learning_rate_decay_steps,\n learning_rate_decay_factor,\n burnin_learning_rate=0.0,\n burnin_steps=0,\n min_learning_rate=0.0,\n staircase=True):\n \"\"\"Exponential decay schedule with burn-in period.\n\n In this schedule, learning rate is fixed at burnin_learning_rate\n for a fixed period, before transitioning to a regular exponential\n decay schedule.\n\n Args:\n global_step: int tensor representing global step.\n learning_rate_base: base learning rate.\n learning_rate_decay_steps: steps to take between decaying the learning rate.\n Note that this includes the number of burn-in steps.\n learning_rate_decay_factor: multiplicative factor by which to decay\n learning rate.\n burnin_learning_rate: initial learning rate during burn-in period. If\n 0.0 (which is the default), then the burn-in learning rate is simply\n set to learning_rate_base.\n burnin_steps: number of steps to use burnin learning rate.\n min_learning_rate: the minimum learning rate.\n staircase: whether use staircase decay.\n\n Returns:\n If executing eagerly:\n returns a no-arg callable that outputs the (scalar)\n float tensor learning rate given the current value of global_step.\n If in a graph:\n immediately returns a (scalar) float tensor representing learning rate.\n \"\"\"\n if burnin_learning_rate == 0:\n burnin_learning_rate = learning_rate_base\n\n def eager_decay_rate():\n \"\"\"Callable to compute the learning rate.\"\"\"\n post_burnin_learning_rate = tf.train.exponential_decay(\n learning_rate_base,\n global_step - burnin_steps,\n learning_rate_decay_steps,\n learning_rate_decay_factor,\n staircase=staircase)\n if callable(post_burnin_learning_rate):\n post_burnin_learning_rate = post_burnin_learning_rate()\n return tf.maximum(tf.where(\n tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)),\n tf.constant(burnin_learning_rate),\n post_burnin_learning_rate), min_learning_rate, name='learning_rate')\n\n return _learning_rate_return_value(eager_decay_rate)\n\n\ndef exponential_decay_with_warmup(global_step,\n learning_rate_base,\n learning_rate_decay_steps,\n learning_rate_decay_factor,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n min_learning_rate=0.0,\n staircase=True):\n \"\"\"Exponential decay schedule with warm up period.\n\n Args:\n global_step: int tensor representing global step.\n learning_rate_base: base learning rate.\n learning_rate_decay_steps: steps to take between decaying the learning rate.\n Note that this includes the number of burn-in steps.\n learning_rate_decay_factor: multiplicative factor by which to decay learning\n rate.\n warmup_learning_rate: initial learning rate during warmup period.\n warmup_steps: number of steps to use warmup learning rate.\n min_learning_rate: the minimum learning rate.\n staircase: whether use staircase decay.\n\n Returns:\n If executing eagerly:\n returns a no-arg callable that outputs the (scalar)\n float tensor learning rate given the current value of global_step.\n If in a graph:\n immediately returns a (scalar) float tensor representing learning rate.\n \"\"\"\n\n def eager_decay_rate():\n \"\"\"Callable to compute the learning rate.\"\"\"\n post_warmup_learning_rate = tf.train.exponential_decay(\n learning_rate_base,\n global_step - warmup_steps,\n learning_rate_decay_steps,\n learning_rate_decay_factor,\n staircase=staircase)\n if callable(post_warmup_learning_rate):\n post_warmup_learning_rate = post_warmup_learning_rate()\n\n if learning_rate_base < warmup_learning_rate:\n raise ValueError('learning_rate_base must be larger or equal to '\n 'warmup_learning_rate.')\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * tf.cast(global_step,\n tf.float32) + warmup_learning_rate\n learning_rate = tf.where(\n tf.less(tf.cast(global_step, tf.int32), tf.constant(warmup_steps)),\n warmup_rate,\n tf.maximum(post_warmup_learning_rate, min_learning_rate),\n name='learning_rate')\n\n return learning_rate\n\n return _learning_rate_return_value(eager_decay_rate)\n\n\ndef cosine_decay_with_warmup(global_step,\n learning_rate_base,\n total_steps,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0):\n \"\"\"Cosine decay schedule with warm up period.\n\n Cosine annealing learning rate as described in:\n Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.\n ICLR 2017. https://arxiv.org/abs/1608.03983\n In this schedule, the learning rate grows linearly from warmup_learning_rate\n to learning_rate_base for warmup_steps, then transitions to a cosine decay\n schedule.\n\n Args:\n global_step: int64 (scalar) tensor representing global step.\n learning_rate_base: base learning rate.\n total_steps: total number of training steps.\n warmup_learning_rate: initial learning rate for warm up.\n warmup_steps: number of warmup steps.\n hold_base_rate_steps: Optional number of steps to hold base learning rate\n before decaying.\n\n Returns:\n If executing eagerly:\n returns a no-arg callable that outputs the (scalar)\n float tensor learning rate given the current value of global_step.\n If in a graph:\n immediately returns a (scalar) float tensor representing learning rate.\n\n Raises:\n ValueError: if warmup_learning_rate is larger than learning_rate_base,\n or if warmup_steps is larger than total_steps.\n \"\"\"\n if total_steps < warmup_steps:\n raise ValueError('total_steps must be larger or equal to '\n 'warmup_steps.')\n def eager_decay_rate():\n \"\"\"Callable to compute the learning rate.\"\"\"\n learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(\n np.pi *\n (tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps\n ) / float(total_steps - warmup_steps - hold_base_rate_steps)))\n if hold_base_rate_steps > 0:\n learning_rate = tf.where(\n global_step > warmup_steps + hold_base_rate_steps,\n learning_rate, learning_rate_base)\n if warmup_steps > 0:\n if learning_rate_base < warmup_learning_rate:\n raise ValueError('learning_rate_base must be larger or equal to '\n 'warmup_learning_rate.')\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * tf.cast(global_step,\n tf.float32) + warmup_learning_rate\n learning_rate = tf.where(global_step < warmup_steps, warmup_rate,\n learning_rate)\n return tf.where(global_step > total_steps, 0.0, learning_rate,\n name='learning_rate')\n\n return _learning_rate_return_value(eager_decay_rate)\n\n\ndef cosine_decay_with_restarts(global_step,\n initial_learning_rate,\n first_decay_steps,\n t_mul=2.0,\n m_mul=1.0,\n alpha=0.0):\n def eager_decay_rate():\n completed_fraction = tf.cast(global_step / first_decay_steps, tf.float32)\n\n if t_mul == 1.0:\n i_restart = tf.floor(completed_fraction)\n completed_fraction -= i_restart\n else:\n i_restart = tf.floor(tf.log(1.0 - completed_fraction * (1.0 - t_mul)) /\n tf.log(t_mul))\n sum_r = (1.0 - t_mul ** i_restart) / (1.0 - t_mul)\n completed_fraction = (completed_fraction - sum_r) / t_mul ** i_restart\n\n m_fac = m_mul ** i_restart\n cosine_decayed = 0.5 * m_fac * (1.0 + tf.cos(np.pi * completed_fraction))\n decayed = (1 - alpha) * cosine_decayed + alpha\n learning_rate = tf.multiply(initial_learning_rate, decayed)\n\n return learning_rate\n\n return _learning_rate_return_value(eager_decay_rate)\n\n\ndef manual_stepping(global_step, boundaries, rates, warmup=False):\n \"\"\"Manually stepped learning rate schedule.\n\n This function provides fine grained control over learning rates. One must\n specify a sequence of learning rates as well as a set of integer steps\n at which the current learning rate must transition to the next. For example,\n if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning\n rate returned by this function is .1 for global_step=0,...,4, .01 for\n global_step=5...9, and .001 for global_step=10 and onward.\n\n Args:\n global_step: int64 (scalar) tensor representing global step.\n boundaries: a list of global steps at which to switch learning\n rates. This list is assumed to consist of increasing positive integers.\n rates: a list of (float) learning rates corresponding to intervals between\n the boundaries. The length of this list must be exactly\n len(boundaries) + 1.\n warmup: Whether to linearly interpolate learning rate for steps in\n [0, boundaries[0]].\n\n Returns:\n If executing eagerly:\n returns a no-arg callable that outputs the (scalar)\n float tensor learning rate given the current value of global_step.\n If in a graph:\n immediately returns a (scalar) float tensor representing learning rate.\n Raises:\n ValueError: if one of the following checks fails:\n 1. boundaries is a strictly increasing list of positive integers\n 2. len(rates) == len(boundaries) + 1\n 3. boundaries[0] != 0\n \"\"\"\n if any([b < 0 for b in boundaries]) or any(\n [not isinstance(b, int) for b in boundaries]):\n raise ValueError('boundaries must be a list of positive integers')\n if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):\n raise ValueError('Entries in boundaries must be strictly increasing.')\n if any([not isinstance(r, float) for r in rates]):\n raise ValueError('Learning rates must be floats')\n if len(rates) != len(boundaries) + 1:\n raise ValueError('Number of provided learning rates must exceed '\n 'number of boundary points by exactly 1.')\n\n if boundaries and boundaries[0] == 0:\n raise ValueError('First step cannot be zero.')\n\n if warmup and boundaries:\n slope = (rates[1] - rates[0]) * 1.0 / boundaries[0]\n warmup_steps = list(range(boundaries[0]))\n warmup_rates = [rates[0] + slope * step for step in warmup_steps]\n boundaries = warmup_steps + boundaries\n rates = warmup_rates + rates[1:]\n else:\n boundaries = [0] + boundaries\n num_boundaries = len(boundaries)\n\n def eager_decay_rate():\n \"\"\"Callable to compute the learning rate.\"\"\"\n rate_index = tf.reduce_max(tf.where(\n tf.greater_equal(global_step, boundaries),\n list(range(num_boundaries)),\n [0] * num_boundaries))\n return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries),\n name='learning_rate')\n\n return _learning_rate_return_value(eager_decay_rate)\n"
] | [
[
"tensorflow.compat.v1.executing_eagerly",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.floor",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.where",
"tensorflow.compat.v1.greater_equal",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.cos",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.train.exponential_decay",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.constant"
]
] |
guangxinli/akshare | [
"e27666f94051749e3a2d8c4b669b43f03e16d7cb"
] | [
"akshare/stock/zh_stock_kcb_sina.py"
] | [
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nAuthor: Albert King\ndate: 2019/10/30 11:28\ncontact: [email protected]\ndesc: 新浪财经-科创板-实时行情数据和历史行情数据(包含前复权和后复权因子)\n优化: 在科创板行情的获取上采用多线程模式(新浪会封IP, 不再优化)\n\"\"\"\nimport datetime\nimport re\n\nimport demjson\nimport pandas as pd\nimport requests\nfrom tqdm import tqdm\n\nfrom akshare.stock.cons import (zh_sina_kcb_stock_payload,\n zh_sina_kcb_stock_url,\n zh_sina_kcb_stock_count_url,\n zh_sina_kcb_stock_hist_url,\n zh_sina_kcb_stock_hfq_url,\n zh_sina_kcb_stock_qfq_url,\n zh_sina_kcb_stock_amount_url)\n\n\ndef get_zh_kcb_page_count():\n \"\"\"\n 所有股票的总页数\n http://vip.stock.finance.sina.com.cn/mkt/#hs_a\n :return: int 需要抓取的股票总页数\n \"\"\"\n res = requests.get(zh_sina_kcb_stock_count_url)\n page_count = int(re.findall(re.compile(r\"\\d+\"), res.text)[0]) / 80\n if isinstance(page_count, int):\n return page_count\n else:\n return int(page_count) + 1\n\n\ndef stock_zh_kcb_spot():\n \"\"\"\n 从新浪财经-A股获取所有A股的实时行情数据, 大量抓取容易封IP\n http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk\n :return: pandas.DataFrame\n \"\"\"\n big_df = pd.DataFrame()\n page_count = get_zh_kcb_page_count()\n zh_sina_stock_payload_copy = zh_sina_kcb_stock_payload.copy()\n for page in tqdm(range(1, page_count+1)):\n zh_sina_stock_payload_copy.update({\"page\": page})\n res = requests.get(\n zh_sina_kcb_stock_url,\n params=zh_sina_kcb_stock_payload)\n data_json = demjson.decode(res.text)\n big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)\n return big_df\n\n\ndef stock_zh_kcb_daily(symbol=\"sh688399\", factor=\"\"):\n \"\"\"\n 从新浪财经-A股获取某个股票的历史行情数据, 大量抓取容易封IP\n :param symbol: str e.g., sh600000\n :param factor: str 默认为空, 不复权; qfq, 前复权因子; hfq, 后复权因子;\n :return: pandas.DataFrame\n 不复权数据\n 日期 开盘价 最高价 最低价 收盘价 成交 盘后量 盘后额\n 0 2019-07-22 91.300 97.200 66.300 74.920 58330685 40778 3055088\n 1 2019-07-23 70.020 78.880 70.000 74.130 23906020 43909 3254974\n 2 2019-07-24 74.130 76.550 72.500 75.880 21608530 23149 1756546\n 3 2019-07-25 75.000 79.980 74.600 78.000 24626920 66921 5219838\n 4 2019-07-26 76.780 76.780 70.300 71.680 16831530 49106 3519918\n .. ... ... ... ... ... ... ... ...\n 67 2019-10-31 59.790 60.500 57.800 58.290 2886407 3846 224183\n 68 2019-11-01 57.900 59.960 57.600 59.250 2246059 0 0\n 69 2019-11-04 60.040 61.880 60.040 61.740 3945106 1782 110021\n 70 2019-11-05 61.100 62.780 60.850 62.160 4187105 400 24864\n 71 2019-11-06 62.320 62.620 60.900 61.130 2331354 1300 79469\n\n 后复权因子\n date hfq_factor\n 0 2019-07-22 1.0000000000000000\n 1 1900-01-01 1.0000000000000000\n\n 前复权因子\n date qfq_factor\n 0 2019-07-22 1.0000000000000000\n 1 1900-01-01 1.0000000000000000\n \"\"\"\n res = requests.get(zh_sina_kcb_stock_hist_url.format(symbol, datetime.datetime.now().strftime(\"%Y_%m_%d\"), symbol))\n data_json = demjson.decode(res.text[res.text.find(\"[\"):res.text.rfind(\"]\")+1])\n data_df = pd.DataFrame(data_json)\n data_df.index = pd.to_datetime(data_df[\"d\"])\n data_df.index.name = \"date\"\n del data_df[\"d\"]\n\n r = requests.get(zh_sina_kcb_stock_amount_url.format(symbol, symbol))\n amount_data_json = demjson.decode(r.text[r.text.find(\"[\"): r.text.rfind(\"]\")+1])\n amount_data_df = pd.DataFrame(amount_data_json)\n amount_data_df.index = pd.to_datetime(amount_data_df.date)\n del amount_data_df[\"date\"]\n temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how=\"left\")\n temp_df.fillna(method=\"ffill\", inplace=True)\n temp_df = temp_df.astype(float)\n temp_df[\"amount\"] = temp_df[\"amount\"] * 10000\n temp_df[\"turnover\"] = temp_df[\"v\"] / temp_df[\"amount\"]\n temp_df.columns = [\"开盘价\", \"最高价\", \"最低价\", \"收盘价\", \"成交\", \"盘后量\", \"盘后额\", \"流通股本\", \"换手率\"]\n temp_df.columns = [\"close\", \"high\", \"low\", \"open\", \"volume\", \"after_volume\", \"after_amount\", \"outstanding_share\", \"turnover\"]\n if not factor:\n return temp_df\n if factor == \"hfq\":\n res = requests.get(zh_sina_kcb_stock_hfq_url.format(symbol))\n hfq_factor_df = pd.DataFrame(\n eval(res.text.split(\"=\")[1].split(\"\\n\")[0])['data'])\n hfq_factor_df.columns = [\"date\", \"hfq_factor\"]\n hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)\n del hfq_factor_df[\"date\"]\n return hfq_factor_df\n if factor == \"qfq\":\n res = requests.get(zh_sina_kcb_stock_qfq_url.format(symbol))\n qfq_factor_df = pd.DataFrame(\n eval(res.text.split(\"=\")[1].split(\"\\n\")[0])['data'])\n qfq_factor_df.columns = [\"date\", \"qfq_factor\"]\n qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)\n del qfq_factor_df[\"date\"]\n return qfq_factor_df\n\n\nif __name__ == \"__main__\":\n stock_zh_kcb_daily_qfq_df = stock_zh_kcb_daily(symbol=\"sh688399\", factor=\"qfq\")\n print(stock_zh_kcb_daily_qfq_df)\n stock_zh_kcb_daily_hfq_df = stock_zh_kcb_daily(symbol=\"sh688399\", factor=\"hfq\")\n print(stock_zh_kcb_daily_hfq_df)\n stock_zh_kcb_daily_df = stock_zh_kcb_daily(symbol=\"sh688399\", factor=\"\")\n print(stock_zh_kcb_daily_df)\n # stock_zh_kcb_spot_df = stock_zh_kcb_spot()\n # print(stock_zh_kcb_spot_df)\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.merge"
]
] |
dipta007/SEMal | [
"8c89e385ff946a2f9b9c5cefff3e0becc8e6a455"
] | [
"LEMP/5_lemp_roc_curve.py"
] | [
"import numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, roc_auc_score, confusion_matrix, classification_report\n# from error_measurement import matthews_correlation, sensitivity, specificity, f1_score, precision_m\n# import tensorflow as tf\nimport sys\nfrom config import human, mice\n\n\nif len(sys.argv) <= 1:\n print('Please Specify human or mice')\n exit(1)\n\nconfig = human if sys.argv[1] == 'human' else mice\n\ndata = {}\nresult = {}\n\n\ndef encoded_to_mapping(encoded):\n global data\n data = {}\n with open(encoded) as fp:\n fp.readline()\n for line in fp:\n row = list(map(str.strip, line.split(',')))\n data[row[1]] = row[2].strip()\n\n\ndef data_lemp_model(X, Y):\n global data\n X = list(set(X))\n count = 0\n res = \"\"\n for ind in range(len(X)):\n x = X[ind]\n res += f'>{x}\\n'\n res += data[x]\n res += '\\n'\n\n count += 1\n if count % 100 == 0:\n with open(f'../data/lemp/{sys.argv[1]}/server/{count // 100}.txt', 'w') as fp:\n fp.write(res)\n res = \"\"\n\n\ndef original_result_read(encoded):\n global result\n result = {}\n with open(encoded) as fp:\n fp.readline()\n for line in fp:\n row = list(map(str.strip, line.split(',')))\n # print(row)\n protein_id = row[1].strip()\n protein = row[2].strip()\n seq = row[3].strip()\n\n ind = 0\n while ind < len(protein):\n ind = protein.find(\"K\", ind)\n if ind == -1:\n break\n\n if protein_id not in result:\n result[protein_id] = {}\n\n result[protein_id][ind] = 1 if seq[ind] == '1' else 0\n ind += 1\n\n\ny_test = []\ny_test_predict = []\nTP, TN, FP, FN = 0, 0, 0, 0\n\n\ndef lemp_result_read():\n data_folder = f'../data/lemp/{sys.argv[1]}/result'\n lemp_results = [f for f in listdir(data_folder) if isfile(join(data_folder, f))]\n for f in lemp_results:\n print(f)\n with open(f'{data_folder}/{f}', 'r') as fp:\n fp.readline()\n for line in fp:\n row = line.strip()\n row = [val.strip() for val in row.split()]\n protein_id = row[0]\n lysine_ind = int(row[1]) - 1\n pred = float(row[3])\n # print(row, protein_id, lysine_ind, pred)\n y_test.append(result[protein_id][lysine_ind])\n y_test_predict.append(pred)\n\n\n\ndef evaluate():\n print(len(y_test), len(y_test_predict))\n # with tf.Session() as sess:\n # sen = sess.run(sensitivity(y_test, y_test_predict))\n # spe = sess.run(specificity(y_test, y_test_predict))\n\n # res = \"\\n******************** Independent Test Score ********************\\n\"\n # res += \"Accuracy: {}\\n\".format(accuracy_score(y_test, y_test_predict))\n # res += \"MCC: {}\\n\".format(matthews_corrcoef(y_test, y_test_predict))\n # res += \"Precision: {}\\n\".format(precision_score(y_test, y_test_predict, pos_label=1))\n res = \"\"\n res += \"Roc AUC score: {}\\n\".format(roc_auc_score(y_test, y_test_predict))\n # # res += \"AUC score: {}\\n\".format(auc(y_test, y_test_predict))\n # res += \"F1 score: {}\\n\".format(f1_score(y_test, y_test_predict))\n # res += \"Sensitivity: {}\\n\".format(sen)\n # res += \"Specifity: {}\\n\\n\\n\".format(spe)\n print(res)\n\n print(f\"TP: {TP}, TN: {TN}, FP: {FP}, FN: {FN}\")\n i_want_roc(y_test, y_test_predict)\n\n\ndef i_want_roc(y, y_p):\n from sklearn.metrics import roc_curve\n fper, tper, thresholds = roc_curve(y, y_p)\n plot_roc_curve(fper, tper)\n\n\ndef plot_roc_curve(fper, tper):\n import matplotlib.pyplot as plt\n plt.plot(fper, tper, color='orange', label='ROC')\n plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic (ROC) Curve')\n plt.legend()\n plt.show()\n\n\ndef data_prep_for_lemp_server():\n encoded_to_mapping(config['encoded_file'])\n npzfile = np.load(config['output'], allow_pickle=True)\n X_p = npzfile['arr_0']\n Y_p = npzfile['arr_1']\n X_n = npzfile['arr_2']\n Y_n = npzfile['arr_3']\n\n print(X_p.shape, X_n.shape)\n\n x_train_p, x_test_p, y_train_p, y_test_p = train_test_split(X_p, Y_p, test_size=0.1, shuffle=True, random_state=47)\n x_train_n, x_test_n, y_train_n, y_test_n = train_test_split(X_n, Y_n, test_size=0.1, shuffle=True, random_state=47)\n\n print(x_train_p.shape)\n print(x_train_n.shape)\n print(x_test_p.shape)\n print(x_test_n.shape)\n\n x_test = np.concatenate((x_test_p, x_test_n))\n y_test = np.concatenate((y_test_p, y_test_n))\n\n print(x_test.shape, y_test.shape)\n data_lemp_model(x_test, y_test)\n\n\ndef data_evaluate_for_lemp():\n original_result_read(config['encoded_file'])\n lemp_result_read()\n evaluate()\n\n\nif __name__ == '__main__':\n # data_prep_for_lemp_server()\n data_evaluate_for_lemp()\n\n\n\n"
] | [
[
"numpy.concatenate",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.load",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.roc_curve"
]
] |
bminixhofer/gerpt2 | [
"9ed06f34cc5e7978631e0eae690e57646abc1024"
] | [
"prepare/generate_aligned_wte.py"
] | [
"from tqdm.auto import tqdm\nimport click\nimport numpy as np\nfrom transformers import GPT2LMHeadModel, GPT2TokenizerFast\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport torch\nimport math\nimport json\n\n\ndef load_vectors(path, max_n=200_000):\n with open(path) as f:\n ids = {}\n dim = int(f.readline().strip().split()[1])\n vectors = np.zeros((max_n, dim))\n\n i = 0\n for line in tqdm(f, total=max_n):\n if i == max_n:\n break\n\n parts = line.split()\n name = parts[0]\n\n if name in ids:\n continue\n\n try:\n values = np.array([float(x) for x in parts[1:]])\n vectors[i] = values\n ids[name] = i\n i += 1\n except ValueError:\n pass\n\n return vectors, ids\n\n\ndef get_tokenizer_embeddings(tokenizer, vectors, ids, freqs):\n embs = {value: ([], [], set()) for value in tokenizer.get_vocab().values()}\n\n for lower_key, value in tqdm(ids.items()):\n for key in [lower_key, lower_key.title()]:\n tokenized = tokenizer.encode(\n key, add_special_tokens=False\n ) + tokenizer.encode(\" \" + key, add_special_tokens=False)\n\n for token_id in tokenized:\n if key not in embs[token_id][2] and key in freqs:\n embs[token_id][0].append(vectors[value])\n embs[token_id][1].append(freqs[key])\n embs[token_id][2].add(key)\n\n embs_matrix = np.zeros((len(embs), vectors.shape[1]))\n\n for i in range(len(embs_matrix)):\n if len(embs[i][2]) == 0:\n continue\n\n freqs = np.array(embs[i][1], dtype=np.float32)\n freqs /= freqs.sum()\n\n embs_matrix[i] = (np.stack(embs[i][0]) * freqs[:, np.newaxis]).sum(axis=0)\n\n return embs_matrix\n\n\[email protected]()\[email protected](\"--german_tokenizer\", help=\"Name or path of trained German tokenizer.\")\[email protected](\"--english_tokenizer\", help=\"Name or path of trained English tokenizer.\")\[email protected](\n \"--gpt2_model\", help=\"Name or path of trained English GPT2 model to use WTE from.\"\n)\[email protected](\n \"--german_freqs\", help=\"Path to a .json file with frequencies of german words.\"\n)\[email protected](\n \"--english_freqs\", help=\"Path to a .json file with frequencies of english words.\"\n)\[email protected](\n \"--german_vecs\",\n help=\"German aligned word vectors from https://fasttext.cc/docs/en/aligned-vectors.html.\",\n)\[email protected](\n \"--english_vecs\",\n help=\"English aligned word vectors from https://fasttext.cc/docs/en/aligned-vectors.html.\",\n)\[email protected](\n \"--out_path\", help=\"Path to store the German WTE matrix at as .pt file.\",\n)\ndef main(\n german_tokenizer,\n english_tokenizer,\n gpt2_model,\n german_freqs,\n english_freqs,\n german_vecs,\n english_vecs,\n out_path,\n):\n german_freqs = json.load(open(german_freqs))\n english_freqs = json.load(open(english_freqs))\n\n de_vectors, de_ids = load_vectors(german_vecs)\n en_vectors, en_ids = load_vectors(english_vecs)\n\n german_tokenizer = GPT2TokenizerFast.from_pretrained(german_tokenizer)\n english_tokenizer = GPT2TokenizerFast.from_pretrained(english_tokenizer)\n\n model = GPT2LMHeadModel.from_pretrained(gpt2_model)\n\n en_tok_embs = get_tokenizer_embeddings(\n english_tokenizer, en_vectors, en_ids, english_freqs\n )\n de_tok_embs = get_tokenizer_embeddings(\n german_tokenizer, de_vectors, de_ids, german_freqs\n )\n\n def get_closest(token_id, similarities=None):\n if (de_tok_embs[token_id] == 0).all():\n return None, None\n\n if similarities is None:\n similarities = cosine_similarity(\n de_tok_embs[token_id][np.newaxis, :], en_tok_embs\n )[0]\n\n best = np.argsort(similarities)[::-1]\n\n best = english_tokenizer.convert_ids_to_tokens(best)\n de_token = german_tokenizer.convert_ids_to_tokens([token_id])[0]\n space_before = de_token.startswith(\"Ġ\")\n\n best = [x for x in best if x.startswith(\"Ġ\") == space_before]\n en_token = best[0]\n\n return en_token, de_token\n\n print(\"Some sample mappings:\")\n\n for token_id in np.random.choice(list(german_tokenizer.get_vocab().values()), 30):\n en_token, de_token = get_closest(token_id)\n\n print(f\"{de_token} -> {en_token}\")\n\n german_wte_weight = torch.zeros_like(model.transformer.wte.weight)\n mean, std = (\n model.transformer.wte.weight.mean().item(),\n model.transformer.wte.weight.std().item(),\n )\n\n en_vocab = english_tokenizer.get_vocab()\n n_matched = 0\n\n batch_size = 1024\n for i in tqdm(range(int(math.ceil(len(german_wte_weight) / batch_size)))):\n start, end = i * batch_size, min((i + 1) * batch_size, len(german_wte_weight))\n\n similarities = cosine_similarity(de_tok_embs[start:end], en_tok_embs)\n for token_id in range(start, end):\n en_token, _ = get_closest(\n token_id, similarities=similarities[token_id - start]\n )\n\n if en_token is None:\n german_wte_weight[token_id] = torch.normal(\n mean, std, size=(german_wte_weight.shape[1],)\n )\n else:\n en_token_id = en_vocab[en_token]\n german_wte_weight[token_id] = model.transformer.wte.weight[en_token_id]\n\n n_matched += 1\n\n print(f\"Matching token found for {n_matched} of {len(en_vocab)} tokens.\")\n torch.save(german_wte_weight, out_path)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array",
"numpy.zeros",
"torch.save",
"torch.normal",
"numpy.stack",
"numpy.argsort",
"sklearn.metrics.pairwise.cosine_similarity",
"torch.zeros_like"
]
] |
vtabbott/generative-models | [
"c97ae1b396a32ef8908e27468aef78b758202d1c"
] | [
"GAN/generative_adversarial_parallelization/gap_pytorch.py"
] | [
"import torch\r\nimport torch.nn\r\nimport torch.nn.functional as nn\r\nimport torch.autograd as autograd\r\nimport torch.optim as optim\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.gridspec as gridspec\r\nimport os\r\nimport random\r\nfrom torch.autograd import Variable\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\n\r\n\r\nmnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)\r\nmb_size = 32\r\nz_dim = 10\r\nX_dim = mnist.train.images.shape[1]\r\ny_dim = mnist.train.labels.shape[1]\r\nh_dim = 128\r\ncnt = 0\r\nlr = 1e-3\r\nK = 100\r\n\r\n\r\ndef log(x):\r\n return torch.log(x + 1e-8)\r\n\r\n\r\nG1_ = torch.nn.Sequential(\r\n torch.nn.Linear(z_dim, h_dim),\r\n torch.nn.ReLU(),\r\n torch.nn.Linear(h_dim, X_dim),\r\n torch.nn.Sigmoid()\r\n)\r\n\r\n\r\nD1_ = torch.nn.Sequential(\r\n torch.nn.Linear(X_dim, h_dim),\r\n torch.nn.ReLU(),\r\n torch.nn.Linear(h_dim, 1),\r\n torch.nn.Sigmoid()\r\n)\r\n\r\nG2_ = torch.nn.Sequential(\r\n torch.nn.Linear(z_dim, h_dim),\r\n torch.nn.ReLU(),\r\n torch.nn.Linear(h_dim, X_dim),\r\n torch.nn.Sigmoid()\r\n)\r\n\r\n\r\nD2_ = torch.nn.Sequential(\r\n torch.nn.Linear(X_dim, h_dim),\r\n torch.nn.ReLU(),\r\n torch.nn.Linear(h_dim, 1),\r\n torch.nn.Sigmoid()\r\n)\r\n\r\nnets = [G1_, D1_, G2_, D2_]\r\n\r\n\r\ndef reset_grad():\r\n for net in nets:\r\n net.zero_grad()\r\n\r\n\r\nG1_solver = optim.Adam(G1_.parameters(), lr=lr)\r\nD1_solver = optim.Adam(D1_.parameters(), lr=lr)\r\nG2_solver = optim.Adam(G2_.parameters(), lr=lr)\r\nD2_solver = optim.Adam(D2_.parameters(), lr=lr)\r\n\r\nD1 = {'model': D1_, 'solver': D1_solver}\r\nG1 = {'model': G1_, 'solver': G1_solver}\r\nD2 = {'model': D2_, 'solver': D2_solver}\r\nG2 = {'model': G2_, 'solver': G2_solver}\r\n\r\nGAN_pairs = [(D1, G1), (D2, G2)]\r\n\r\nfor it in range(1000000):\r\n # Sample data\r\n z = Variable(torch.randn(mb_size, z_dim))\r\n X, _ = mnist.train.next_batch(mb_size)\r\n X = Variable(torch.from_numpy(X))\r\n\r\n for D, G in GAN_pairs:\r\n # Discriminator\r\n G_sample = G['model'](z)\r\n D_real = D['model'](X)\r\n D_fake = D['model'](G_sample)\r\n\r\n D_loss = -torch.mean(log(D_real) + log(1 - D_fake))\r\n\r\n D_loss.backward()\r\n D['solver'].step()\r\n reset_grad()\r\n\r\n # Generator\r\n G_sample = G['model'](z)\r\n D_fake = D['model'](G_sample)\r\n\r\n G_loss = -torch.mean(log(D_fake))\r\n\r\n G_loss.backward()\r\n G['solver'].step()\r\n reset_grad()\r\n\r\n if it != 0 and it % K == 0:\r\n # Swap (D, G) pairs\r\n new_D1, new_D2 = GAN_pairs[1][0], GAN_pairs[0][0]\r\n GAN_pairs = [(new_D1, G1), (new_D2, G2)]\r\n\r\n # Print and plot every now and then\r\n if it % 1000 == 0:\r\n print('Iter-{}; D_loss: {:.4}; G_loss: {:.4}'\r\n .format(it, D_loss.data[0], G_loss.data[0]))\r\n\r\n # Pick G randomly\r\n G_rand = random.choice([G1_, G2_])\r\n samples = G_rand(z).data.numpy()[:16]\r\n\r\n fig = plt.figure(figsize=(4, 4))\r\n gs = gridspec.GridSpec(4, 4)\r\n gs.update(wspace=0.05, hspace=0.05)\r\n\r\n for i, sample in enumerate(samples):\r\n ax = plt.subplot(gs[i])\r\n plt.axis('off')\r\n ax.set_xticklabels([])\r\n ax.set_yticklabels([])\r\n ax.set_aspect('equal')\r\n plt.imshow(sample.reshape(28, 28), cmap='Greys_r')\r\n\r\n if not os.path.exists('out/'):\r\n os.makedirs('out/')\r\n\r\n plt.savefig('out/{}.png'.format(str(cnt).zfill(3)), bbox_inches='tight')\r\n cnt += 1\r\n plt.close(fig)\r\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Sigmoid",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"matplotlib.pyplot.close",
"torch.from_numpy",
"torch.nn.ReLU",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"torch.log",
"torch.randn",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplot"
]
] |
EricWebsmith/pyod | [
"c26b11f6b8e4b404607ae16879e2b0b4c3e46811"
] | [
"pyod/models/cblof.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Clustering Based Local Outlier Factor (CBLOF)\n\"\"\"\n# Author: Yue Zhao <[email protected]>\n# Shangwen Huang <https://github.com/shangwen777>\n# License: BSD 2 clause\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom sklearn.cluster import KMeans\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.validation import check_array\nfrom sklearn.utils.estimator_checks import check_estimator\n\nfrom .base import BaseDetector\nfrom ..utils.utility import check_parameter\nfrom ..utils.stat_models import pairwise_distances_no_broadcast\n\n__all__ = ['CBLOF']\n\n\nclass CBLOF(BaseDetector):\n r\"\"\"The CBLOF operator calculates the outlier score based on cluster-based\n local outlier factor.\n\n CBLOF takes as an input the data set and the cluster model that was\n generated by a clustering algorithm. It classifies the clusters into small\n clusters and large clusters using the parameters alpha and beta.\n The anomaly score is then calculated based on the size of the cluster the\n point belongs to as well as the distance to the nearest large cluster.\n\n Use weighting for outlier factor based on the sizes of the clusters as\n proposed in the original publication. Since this might lead to unexpected\n behavior (outliers close to small clusters are not found), it is disabled\n by default.Outliers scores are solely computed based on their distance to\n the closest large cluster center.\n\n By default, kMeans is used for clustering algorithm instead of\n Squeezer algorithm mentioned in the original paper for multiple reasons.\n\n See :cite:`he2003discovering` for details.\n\n Parameters\n ----------\n n_clusters : int, optional (default=8)\n The number of clusters to form as well as the number of\n centroids to generate.\n\n contamination : float in (0., 0.5), optional (default=0.1)\n The amount of contamination of the data set,\n i.e. the proportion of outliers in the data set. Used when fitting to\n define the threshold on the decision function.\n\n clustering_estimator : Estimator, optional (default=None)\n The base clustering algorithm for performing data clustering.\n A valid clustering algorithm should be passed in. The estimator should\n have standard sklearn APIs, fit() and predict(). The estimator should\n have attributes ``labels_`` and ``cluster_centers_``.\n If ``cluster_centers_`` is not in the attributes once the model is fit,\n it is calculated as the mean of the samples in a cluster.\n\n If not set, CBLOF uses KMeans for scalability. See\n https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\n\n alpha : float in (0.5, 1), optional (default=0.9)\n Coefficient for deciding small and large clusters. The ratio\n of the number of samples in large clusters to the number of samples in\n small clusters.\n\n beta : int or float in (1,), optional (default=5).\n Coefficient for deciding small and large clusters. For a list\n sorted clusters by size `|C1|, \\|C2|, ..., |Cn|, beta = |Ck|/|Ck-1|`\n\n use_weights : bool, optional (default=False)\n If set to True, the size of clusters are used as weights in\n outlier score calculation.\n\n check_estimator : bool, optional (default=False)\n If set to True, check whether the base estimator is consistent with\n sklearn standard.\n\n .. warning::\n check_estimator may throw errors with scikit-learn 0.20 above.\n\n random_state : int, RandomState or None, optional (default=None)\n If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`.\n\n n_jobs : integer, optional (default=1)\n The number of jobs to run in parallel for both `fit` and `predict`.\n If -1, then the number of jobs is set to the number of cores.\n\n Attributes\n ----------\n clustering_estimator_ : Estimator, sklearn instance\n Base estimator for clustering.\n\n cluster_labels_ : list of shape (n_samples,)\n Cluster assignment for the training samples.\n\n n_clusters_ : int\n Actual number of clusters (possibly different from n_clusters).\n\n cluster_sizes_ : list of shape (n_clusters_,)\n The size of each cluster once fitted with the training data.\n\n decision_scores_ : numpy array of shape (n_samples,)\n The outlier scores of the training data.\n The higher, the more abnormal. Outliers tend to have higher scores.\n This value is available once the detector is fitted.\n\n cluster_centers_ : numpy array of shape (n_clusters_, n_features)\n The center of each cluster.\n\n small_cluster_labels_ : list of clusters numbers\n The cluster assignments belonging to small clusters.\n\n large_cluster_labels_ : list of clusters numbers\n The cluster assignments belonging to large clusters.\n\n threshold_ : float\n The threshold is based on ``contamination``. It is the\n ``n_samples * contamination`` most abnormal samples in\n ``decision_scores_``. The threshold is calculated for generating\n binary outlier labels.\n\n labels_ : int, either 0 or 1\n The binary labels of the training data. 0 stands for inliers\n and 1 for outliers/anomalies. It is generated by applying\n ``threshold_`` on ``decision_scores_``.\n \"\"\"\n\n def __init__(self, n_clusters=8, contamination=0.1,\n clustering_estimator=None, alpha=0.9, beta=5,\n use_weights=False, check_estimator=False, random_state=None,\n n_jobs=1):\n super(CBLOF, self).__init__(contamination=contamination)\n self.n_clusters = n_clusters\n self.clustering_estimator = clustering_estimator\n self.alpha = alpha\n self.beta = beta\n self.use_weights = use_weights\n self.check_estimator = check_estimator\n self.random_state = random_state\n self.n_jobs = n_jobs\n\n # noinspection PyIncorrectDocstring\n def fit(self, X, y=None):\n \"\"\"Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n\n # validate inputs X and y (optional)\n X = check_array(X)\n self._set_n_classes(y)\n n_samples, n_features = X.shape\n\n # check parameters\n # number of clusters are default to 8\n self._validate_estimator(default=KMeans(\n n_clusters=self.n_clusters,\n random_state=self.random_state,\n n_jobs=self.n_jobs))\n\n self.clustering_estimator_.fit(X=X, y=y)\n # Get the labels of the clustering results\n # labels_ is consistent across sklearn clustering algorithms\n self.cluster_labels_ = self.clustering_estimator_.labels_\n self.cluster_sizes_ = np.bincount(self.cluster_labels_)\n\n # Get the actual number of clusters\n self.n_clusters_ = self.cluster_sizes_.shape[0]\n\n if self.n_clusters_ != self.n_clusters:\n warnings.warn(\"The chosen clustering for CBLOF forms {0} clusters\"\n \"which is inconsistent with n_clusters ({1}).\".\n format(self.n_clusters_, self.n_clusters))\n\n self._set_cluster_centers(X, n_features)\n self._set_small_large_clusters(n_samples)\n\n self.decision_scores_ = self._decision_function(X,\n self.cluster_labels_)\n\n self._process_decision_scores()\n return self\n\n def decision_function(self, X):\n \"\"\"Predict raw anomaly score of X using the fitted detector.\n\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n \"\"\"\n check_is_fitted(self, ['decision_scores_', 'threshold_', 'labels_'])\n X = check_array(X)\n labels = self.clustering_estimator_.predict(X)\n return self._decision_function(X, labels)\n\n def _validate_estimator(self, default=None):\n \"\"\"Check the value of alpha and beta and clustering algorithm.\n \"\"\"\n check_parameter(self.alpha, low=0, high=1, param_name='alpha',\n include_left=False, include_right=False)\n\n check_parameter(self.beta, low=1, param_name='beta',\n include_left=False)\n\n if self.clustering_estimator is not None:\n self.clustering_estimator_ = self.clustering_estimator\n else:\n self.clustering_estimator_ = default\n\n # make sure the base clustering algorithm is valid\n if self.clustering_estimator_ is None:\n raise ValueError(\"clustering algorithm cannot be None\")\n\n if self.check_estimator:\n check_estimator(self.clustering_estimator_)\n\n def _set_cluster_centers(self, X, n_features):\n # Noted not all clustering algorithms have cluster_centers_\n if hasattr(self.clustering_estimator_, 'cluster_centers_'):\n self.cluster_centers_ = self.clustering_estimator_.cluster_centers_\n else:\n # Set the cluster center as the mean of all the samples within\n # the cluster\n warnings.warn(\"The chosen clustering for CBLOF does not have\"\n \"the center of clusters. Calculate the center\"\n \"as the mean of the clusters.\")\n self.cluster_centers_ = np.zeros([self.n_clusters_, n_features])\n for i in range(self.n_clusters_):\n self.cluster_centers_[i, :] = np.mean(\n X[np.where(self.cluster_labels_ == i)], axis=0)\n\n def _set_small_large_clusters(self, n_samples):\n # Sort the index of clusters by the number of samples belonging to it\n size_clusters = np.bincount(self.cluster_labels_)\n\n # Sort the order from the largest to the smallest\n sorted_cluster_indices = np.argsort(size_clusters * -1)\n\n # Initialize the lists of index that fulfill the requirements by\n # either alpha or beta\n alpha_list = []\n beta_list = []\n\n for i in range(1, self.n_clusters_):\n temp_sum = np.sum(size_clusters[sorted_cluster_indices[:i]])\n if temp_sum >= n_samples * self.alpha:\n alpha_list.append(i)\n\n if size_clusters[sorted_cluster_indices[i - 1]] / size_clusters[\n sorted_cluster_indices[i]] >= self.beta:\n beta_list.append(i)\n\n # Find the separation index fulfills both alpha and beta\n intersection = np.intersect1d(alpha_list, beta_list)\n\n if len(intersection) > 0:\n self._clustering_threshold = intersection[0]\n elif len(alpha_list) > 0:\n self._clustering_threshold = alpha_list[0]\n elif len(beta_list) > 0:\n self._clustering_threshold = beta_list[0]\n else:\n raise ValueError(\"Could not form valid cluster separation. Please \"\n \"change n_clusters or change clustering method\")\n\n self.small_cluster_labels_ = sorted_cluster_indices[\n self._clustering_threshold:]\n self.large_cluster_labels_ = sorted_cluster_indices[\n 0:self._clustering_threshold]\n\n # No need to calculate small cluster center\n # self.small_cluster_centers_ = self.cluster_centers_[\n # self.small_cluster_labels_]\n\n self._large_cluster_centers = self.cluster_centers_[\n self.large_cluster_labels_]\n\n def _decision_function(self, X, labels):\n # Initialize the score array\n scores = np.zeros([X.shape[0], ])\n\n small_indices = np.where(\n np.isin(labels, self.small_cluster_labels_))[0]\n large_indices = np.where(\n np.isin(labels, self.large_cluster_labels_))[0]\n\n if small_indices.shape[0] != 0:\n # Calculate the outlier factor for the samples in small clusters\n dist_to_large_center = cdist(X[small_indices, :],\n self._large_cluster_centers)\n\n scores[small_indices] = np.min(dist_to_large_center, axis=1)\n\n if large_indices.shape[0] != 0:\n # Calculate the outlier factor for the samples in large clusters\n large_centers = self.cluster_centers_[labels[large_indices]]\n\n scores[large_indices] = pairwise_distances_no_broadcast(\n X[large_indices, :], large_centers)\n\n if self.use_weights:\n # Weights are calculated as the number of elements in the cluster\n scores = scores / self.cluster_sizes_[labels]\n\n return scores.ravel()\n"
] | [
[
"numpy.bincount",
"sklearn.utils.validation.check_is_fitted",
"numpy.zeros",
"numpy.sum",
"sklearn.utils.validation.check_array",
"sklearn.cluster.KMeans",
"numpy.min",
"sklearn.utils.estimator_checks.check_estimator",
"numpy.where",
"numpy.intersect1d",
"numpy.argsort",
"scipy.spatial.distance.cdist",
"numpy.isin"
]
] |
NDM-123/AISearch | [
"b047d19609a60017855ee724666819949ffa3da6"
] | [
"AIsearch.py"
] | [
"import numpy \r\nimport sys \r\nimport nmslib \r\nimport time \r\nimport math\r\nimport pandas as pd\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom sklearn.model_selection import train_test_split\r\nprint(sys.version)\r\nprint(\"NMSLIB version:\", nmslib.__version__)\r\n\r\n\r\n# Just read the data\r\nall_data_matrix = pd.read_csv(\"TimeBasedFeatures-10s-Layer2.csv\")\r\ndf = all_data_matrix.drop('Source IP', axis=1)\r\n# df = all_data_matrix.drop('Source Port', axis=1, inplace=True)\r\n# label = df[['label']]\r\n# df = df.drop('label', axis=1)\r\ndf = df.drop(' Destination IP', axis=1)\r\n\r\nfrom sklearn import preprocessing\r\n\r\nle = preprocessing.LabelEncoder()\r\nle.fit(df.label)\r\ndf['label'] = le.transform(df.label)\r\n\r\nall_data_matrix = df.to_numpy()\r\n# all_data_matrix = numpy.loadtxt(open(\"TBF.csv\", \"rb\"), delimiter=\",\", skiprows=1)\r\n# numpy.loadtxt(open(\"test.csv\", \"rb\"), delimiter=\",\", skiprows=1)\r\n\r\n# Create a held-out query data set\r\n(data_matrix, query_matrix) = train_test_split(all_data_matrix, test_size = 0.1)\r\n\r\nprint(\"# of queries %d, # of data points %d\" % (query_matrix.shape[0], data_matrix.shape[0]) )\r\n\r\n# Set index parameters\r\n# These are the most important onese\r\nNN = 15\r\nefC = 100\r\n\r\nnum_threads = 4\r\nindex_time_params = {'NN': NN, 'indexThreadQty': num_threads, 'efConstruction': efC}\r\n\r\n# Number of neighbors\r\nK=100\r\n\r\n# Space name should correspond to the space name\r\n# used for brute-force search\r\nspace_name='kldivgenfast'\r\n\r\n\r\n# Intitialize the library, specify the space, the type of the vector and add data points\r\nindex = nmslib.init(method='sw-graph', space=space_name, data_type=nmslib.DataType.DENSE_VECTOR)\r\nindex.addDataPointBatch(data_matrix)\r\n\r\n\r\n# Create an index\r\nstart = time.time()\r\nindex.createIndex(index_time_params)\r\nend = time.time()\r\nprint('Index-time parameters', index_time_params)\r\nprint('Indexing time = %f' % (end-start))\r\n\r\n# Setting query-time parameters\r\nefS = 100\r\nquery_time_params = {'efSearch': efS}\r\nprint('Setting query-time parameters', query_time_params)\r\nindex.setQueryTimeParams(query_time_params)\r\n\r\n\r\n# Querying\r\nquery_qty = query_matrix.shape[0]\r\nstart = time.time()\r\nnbrs = index.knnQueryBatch(query_matrix, k = K, num_threads = num_threads)\r\nend = time.time()\r\nprint('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %\r\n (end-start, float(end-start)/query_qty, num_threads*float(end-start)/query_qty))\r\n\r\n# Computing gold-standard data\r\nprint('Computing gold-standard data')\r\n\r\nstart = time.time()\r\n\r\ngs = []\r\n\r\nquery_qty = query_matrix.shape[0]\r\ndata_qty = data_matrix.shape[0]\r\n\r\nfor i in range(query_qty):\r\n q = query_matrix[i, :]\r\n d = numpy.log(data_matrix * (1.0 / q))\r\n dist_vals = numpy.sum(data_matrix * d, axis=1)\r\n tmp = [(dist_vals[i], i) for i in range(data_qty)]\r\n tmp.sort()\r\n gs.append([tmp[i][1] for i in range(K)])\r\n\r\nend = time.time()\r\n\r\nprint('brute-force kNN time total=%f (sec), per query=%f (sec)' %\r\n (end - start, float(end - start) / query_qty))\r\n\r\n# Finally computing recall\r\nrecall=0.0\r\nfor i in range(0, query_qty):\r\n correct_set = set(gs[i])\r\n ret_set = set(nbrs[i][0])\r\n recall = recall + float(len(correct_set.intersection(ret_set))) / len(correct_set)\r\nrecall = recall / query_qty\r\nprint('kNN recall %f' % recall)\r\n\r\n\r\n# Save a meta index and the data\r\nindex.saveIndex('dense_index_kldiv.txt', save_data=True)\r\n\r\n\r\n# Re-intitialize the library, specify the space, the type of the vector.\r\nnewIndex = nmslib.init(method='sw-graph', space=space_name, data_type=nmslib.DataType.DENSE_VECTOR)\r\n\r\n\r\n# Re-load the index and the data\r\nnewIndex.loadIndex('dense_index_kldiv.txt', load_data=True)\r\n\r\n\r\n# Setting query-time parameters and querying\r\nprint('Setting query-time parameters', query_time_params)\r\nnewIndex.setQueryTimeParams(query_time_params)\r\n\r\nquery_qty = query_matrix.shape[0]\r\nstart = time.time()\r\nnew_nbrs = newIndex.knnQueryBatch(query_matrix, k = K, num_threads = num_threads)\r\nend = time.time()\r\nprint('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %\r\n (end-start, float(end-start)/query_qty, num_threads*float(end-start)/query_qty))\r\n\r\n\r\n\r\n# Finally computing recall\r\nrecall=0.0\r\nfor i in range(0, query_qty):\r\n correct_set = set(gs[i])\r\n ret_set = set(nbrs[i][0])\r\n recall = recall + float(len(correct_set.intersection(ret_set))) / len(correct_set)\r\nrecall = recall / query_qty\r\nprint('kNN recall %f' % recall)\r\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"numpy.log",
"numpy.sum",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
gmetsov/slow-momentum-fast-reversion | [
"861c13ff2ea1d9e0b24523356c574c3ceaacf5bb"
] | [
"src/classical_strategies.py"
] | [
"import numpy as np\nimport pandas as pd\n\nfrom typing import List, Tuple\n\nVOL_LOOKBACK = 60 # for ex-ante volatility\nVOL_TARGET = 0.15 # 15% volatility target\n\ndef calc_returns(srs: pd.Series, day_offset: int = 1) -> pd.Series:\n \"\"\"for each element of a pandas time-series srs,\n calculates the returns over the past number of days\n specified by offset\n\n Args:\n srs (pd.Series): time-series of prices\n day_offset (int, optional): number of days to calculate returns over. Defaults to 1.\n\n Returns:\n pd.Series: series of returns\n \"\"\"\n returns = srs / srs.shift(day_offset) - 1.0\n return returns\n\n\ndef calc_daily_vol(daily_returns):\n return (\n daily_returns.ewm(span=VOL_LOOKBACK, min_periods=VOL_LOOKBACK)\n .std()\n .fillna(method=\"bfill\")\n )\n\n\ndef calc_vol_scaled_returns(daily_returns, daily_vol=pd.Series(None)):\n \"\"\"calculates volatility scaled returns for annualised VOL_TARGET of 15%\n with input of pandas series daily_returns\"\"\"\n if not len(daily_vol):\n daily_vol = calc_daily_vol(daily_returns)\n annualised_vol = daily_vol * np.sqrt(252) # annualised\n return daily_returns * VOL_TARGET / annualised_vol.shift(1)\n\n\ndef calc_trend_intermediate_strategy(\n srs: pd.Series, w: float, volatility_scaling=True\n) -> pd.Series:\n \"\"\"Calculate intermediate strategy\n\n Args:\n srs (pd.Series): series of prices\n w (float): weight, w=0 is Moskowitz TSMOM\n volatility_scaling (bool, optional): [description]. Defaults to True.\n\n Returns:\n pd.Series: series of captured returns\n \"\"\"\n daily_returns = calc_returns(srs)\n monthly_returns = calc_returns(srs, 21)\n annual_returns = calc_returns(srs, 252)\n\n next_day_returns = (\n calc_vol_scaled_returns(daily_returns).shift(-1)\n if volatility_scaling\n else daily_returns.shift(-1)\n )\n\n return (\n w * np.sign(monthly_returns) * next_day_returns\n + (1 - w) * np.sign(annual_returns) * next_day_returns\n )\n\n\nclass MACDStrategy:\n def __init__(self, trend_combinations: List[Tuple[float, float]] = None):\n \"\"\"Used to calculated the combined MACD signal for a multiple short/signal combinations,\n as described in https://arxiv.org/pdf/1904.04912.pdf\n\n Args:\n trend_combinations (List[Tuple[float, float]], optional): short/long trend combinations. Defaults to None.\n \"\"\"\n if trend_combinations is None:\n self.trend_combinations = [(8, 24), (16, 48), (32, 96)]\n else:\n self.trend_combinations = trend_combinations\n\n @staticmethod\n def calc_signal(srs: pd.Series, short_timescale: int, long_timescale: int) -> float:\n \"\"\"Calculate MACD signal for a signal short/long timescale combination\n\n Args:\n srs ([type]): series of prices\n short_timescale ([type]): short timescale\n long_timescale ([type]): long timescale\n\n Returns:\n float: MACD signal\n \"\"\"\n\n def _calc_halflife(timescale):\n return np.log(0.5) / np.log(1 - 1 / timescale)\n\n macd = (\n srs.ewm(halflife=_calc_halflife(short_timescale)).mean()\n - srs.ewm(halflife=_calc_halflife(long_timescale)).mean()\n )\n q = macd / srs.rolling(63).std().fillna(method=\"bfill\")\n return q / q.rolling(252).std().fillna(method=\"bfill\")\n\n @staticmethod\n def scale_signal(y):\n return y * np.exp(-(y ** 2) / 4) / 0.89\n\n def calc_combined_signal(self, srs: pd.Series) -> float:\n \"\"\"Combined MACD signal\n\n Args:\n srs (pd.Series): series of prices\n\n Returns:\n float: MACD combined signal\n \"\"\"\n return np.sum(\n [self.calc_signal(srs, S, L) for S, L in self.trend_combinations]\n ) / len(self.trend_combinations)\n"
] | [
[
"numpy.log",
"numpy.exp",
"numpy.sign",
"numpy.sqrt",
"pandas.Series"
]
] |
Jiawei-Li20/cogdl | [
"a5540e018661d8a62f40282f2bfdde87e766c76f"
] | [
"cogdl/tasks/node_classification.py"
] | [
"import argparse\nimport copy\nfrom typing import Optional\nimport scipy.sparse as sp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom cogdl.models.supervised_model import SupervisedHomogeneousNodeClassificationModel\nfrom cogdl.trainers.supervised_trainer import (\n SupervisedHomogeneousNodeClassificationTrainer,\n)\nfrom cogdl.trainers.sampled_trainer import SAINTTrainer\n\nfrom . import BaseTask, register_task\n\n\ndef normalize_adj_row(adj):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(adj.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.0\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(adj)\n return mx\n\n\ndef to_torch_sparse(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\n\ndef row_l1_normalize(X):\n norm = 1e-6 + X.sum(dim=1, keepdim=True)\n return X / norm\n\n\ndef preprocess_data_sgcpn(data, normalize_feature=True, missing_rate=0):\n data.train_mask = data.train_mask.type(torch.bool)\n data.val_mask = data.val_mask.type(torch.bool)\n # expand test_mask to all rest nodes\n data.test_mask = ~(data.train_mask + data.val_mask)\n # get adjacency matrix\n n = len(data.x)\n adj = sp.csr_matrix((np.ones(data.edge_index.shape[1]), data.edge_index), shape=(n, n))\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) + sp.eye(adj.shape[0])\n adj = normalize_adj_row(adj)\n data.adj = to_torch_sparse(adj).to_dense()\n if normalize_feature:\n data.x = row_l1_normalize(data.x)\n erasing_pool = torch.arange(n)[~data.train_mask]\n size = int(len(erasing_pool) * (missing_rate / 100))\n idx_erased = np.random.choice(erasing_pool, size=size, replace=False)\n if missing_rate > 0:\n data.x[idx_erased] = 0\n return data\n\n\n@register_task(\"node_classification\")\nclass NodeClassification(BaseTask):\n \"\"\"Node classification task.\"\"\"\n\n @staticmethod\n def add_args(parser: argparse.ArgumentParser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument(\"--missing-rate\", type=int, default=-1)\n # fmt: on\n\n def __init__(\n self,\n args,\n dataset=None,\n model: Optional[SupervisedHomogeneousNodeClassificationModel] = None,\n ):\n super(NodeClassification, self).__init__(args)\n\n self.args = args\n self.model_name = args.model\n self.device = args.device_id[0] if not args.cpu else \"cpu\"\n dataset = build_dataset(args) if dataset is None else dataset\n if args.missing_rate >= 0:\n if args.model == \"sgcpn\":\n assert args.dataset in [\"cora\", \"citeseer\", \"pubmed\"]\n dataset.data = preprocess_data_sgcpn(dataset.data, normalize_feature=True, missing_rate=0)\n adj_slice = torch.tensor(dataset.data.adj.size())\n adj_slice[0] = 0\n dataset.slices[\"adj\"] = adj_slice\n\n self.dataset = dataset\n self.data = dataset[0]\n\n # add dropedge args\n self.dropedge = float(args.dropedge)\n # store the original edge index\n self.original_edge_idx = torch.tensor(self.data.edge_index)\n self.original_edge_num = self.original_edge_idx.shape[1]\n\n args.num_features = dataset.num_features\n args.num_classes = dataset.num_classes\n args.num_nodes = dataset.data.x.shape[0]\n\n self.model: SupervisedHomogeneousNodeClassificationModel = build_model(args) if model is None else model\n self.model.set_device(self.device)\n\n self.trainer: Optional[SupervisedHomogeneousNodeClassificationTrainer] = (\n self.model.get_trainer(NodeClassification, self.args)(self.args)\n if self.model.get_trainer(NodeClassification, self.args)\n else None\n )\n\n if not self.trainer:\n self.optimizer = (\n torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n if not hasattr(self.model, \"get_optimizer\")\n else self.model.get_optimizer(args)\n )\n self.data.apply(lambda x: x.to(self.device))\n self.model: SupervisedHomogeneousNodeClassificationModel = self.model.to(self.device)\n self.patience = args.patience\n self.max_epoch = args.max_epoch\n\n def train(self):\n if self.trainer:\n if isinstance(self.trainer, SAINTTrainer):\n self.model = self.trainer.fit(self.model, self.dataset)\n self.data.apply(lambda x: x.to(self.device))\n else:\n result = self.trainer.fit(self.model, self.dataset)\n if issubclass(type(result), torch.nn.Module):\n self.model = result\n else:\n return result\n else:\n epoch_iter = tqdm(range(self.max_epoch))\n patience = 0\n best_score = 0\n best_loss = np.inf\n max_score = 0\n min_loss = np.inf\n best_model = copy.deepcopy(self.model)\n for epoch in epoch_iter:\n self._train_step()\n train_acc, _ = self._test_step(split=\"train\")\n val_acc, val_loss = self._test_step(split=\"val\")\n epoch_iter.set_description(f\"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}\")\n if val_loss <= min_loss or val_acc >= max_score:\n if val_loss <= best_loss: # and val_acc >= best_score:\n best_loss = val_loss\n best_score = val_acc\n best_model = copy.deepcopy(self.model)\n min_loss = np.min((min_loss, val_loss))\n max_score = np.max((max_score, val_acc))\n patience = 0\n else:\n patience += 1\n if patience == self.patience:\n epoch_iter.close()\n break\n print(f\"Valid accurracy = {best_score}\")\n self.model = best_model\n test_acc, _ = self._test_step(split=\"test\")\n val_acc, _ = self._test_step(split=\"val\")\n print(f\"Test accuracy = {test_acc}\")\n return dict(Acc=test_acc, ValAcc=val_acc)\n\n def _train_step(self):\n self.model.train()\n self.optimizer.zero_grad()\n\n # drop the edge\n remaining_edge_num = int((1 - self.dropedge) * self.original_edge_num)\n perm = np.random.permutation(self.original_edge_num)\n remaining_edge = perm[:remaining_edge_num]\n self.data.edge_index = self.original_edge_idx[:, remaining_edge]\n\n self.model.node_classification_loss(self.data).backward()\n self.optimizer.step()\n\n def _test_step(self, split=\"val\", logits=None):\n self.model.eval()\n logits = logits if logits else self.model.predict(self.data)\n logits = F.log_softmax(logits, dim=-1)\n if split == \"train\":\n mask = self.data.train_mask\n elif split == \"val\":\n mask = self.data.val_mask\n else:\n mask = self.data.test_mask\n loss = F.nll_loss(logits[mask], self.data.y[mask]).item()\n\n pred = logits[mask].max(1)[1]\n acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()\n return acc, loss\n"
] | [
[
"torch.Size",
"numpy.isinf",
"numpy.max",
"numpy.random.choice",
"scipy.sparse.diags",
"torch.arange",
"numpy.random.permutation",
"numpy.ones",
"numpy.min",
"torch.sparse.FloatTensor",
"torch.from_numpy",
"scipy.sparse.eye",
"torch.nn.functional.log_softmax",
"torch.tensor",
"numpy.power",
"torch.nn.functional.nll_loss",
"numpy.vstack"
]
] |
richardmeng/gym | [
"a6c06fc1e3e8842c3e3fffd9e701dd1beeba5541"
] | [
"gym/envs/classic_control/cartpole.py"
] | [
"\"\"\"\nClassic cart-pole system implemented by Rich Sutton et al.\nCopied from https://webdocs.cs.ualberta.ca/~sutton/book/code/pole.c\n\"\"\"\n\nimport logging\nimport math\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nclass CartPoleEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 50\n }\n\n def __init__(self):\n self.gravity = 9.8\n self.masscart = 1.0\n self.masspole = 0.1\n self.total_mass = (self.masspole + self.masscart)\n self.length = 0.5 # actually half the pole's length\n self.polemass_length = (self.masspole * self.length)\n self.force_mag = 10.0\n self.tau = 0.02 # seconds between state updates\n\n # Angle at which to fail the episode\n self.theta_threshold_radians = 12 * 2 * math.pi / 360\n self.x_threshold = 2.4\n\n # Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds\n high = np.array([\n self.x_threshold * 2,\n np.finfo(np.float32).max,\n self.theta_threshold_radians * 2,\n np.finfo(np.float32).max])\n\n self.action_space = spaces.Discrete(2)\n self.observation_space = spaces.Box(-high, high)\n\n self._seed()\n self.reset()\n self.viewer = None\n\n self.steps_beyond_done = None\n\n # Just need to initialize the relevant attributes\n self._configure()\n\n def _configure(self, display=None):\n self.display = display\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n state = self.state\n x, x_dot, theta, theta_dot = state\n force = self.force_mag if action==1 else -self.force_mag\n costheta = math.cos(theta)\n sintheta = math.sin(theta)\n temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n x = x + self.tau * x_dot\n x_dot = x_dot + self.tau * xacc\n theta = theta + self.tau * theta_dot\n theta_dot = theta_dot + self.tau * thetaacc\n self.state = (x,x_dot,theta,theta_dot)\n done = x < -self.x_threshold \\\n or x > self.x_threshold \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians\n done = bool(done)\n\n if not done:\n reward = 1.0\n elif self.steps_beyond_done is None:\n # Pole just fell!\n self.steps_beyond_done = 0\n reward = 1.0\n else:\n if self.steps_beyond_done == 0:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n reward = 0.0\n\n return np.array(self.state), reward, done, {}\n\n def _reset(self):\n self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))\n self.steps_beyond_done = None\n return np.array(self.state)\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width = 600\n screen_height = 400\n\n world_width = self.x_threshold*2\n scale = screen_width/world_width\n carty = 100 # TOP OF CART\n polewidth = 10.0\n polelen = scale * 1.0\n cartwidth = 50.0\n cartheight = 30.0\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height, display=self.display)\n l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2\n axleoffset =cartheight/4.0\n cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n self.carttrans = rendering.Transform()\n cart.add_attr(self.carttrans)\n self.viewer.add_geom(cart)\n l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2\n pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n pole.set_color(.8,.6,.4)\n self.poletrans = rendering.Transform(translation=(0, axleoffset))\n pole.add_attr(self.poletrans)\n pole.add_attr(self.carttrans)\n self.viewer.add_geom(pole)\n self.axle = rendering.make_circle(polewidth/2)\n self.axle.add_attr(self.poletrans)\n self.axle.add_attr(self.carttrans)\n self.axle.set_color(.5,.5,.8)\n self.viewer.add_geom(self.axle)\n self.track = rendering.Line((0,carty), (screen_width,carty))\n self.track.set_color(0,0,0)\n self.viewer.add_geom(self.track)\n\n x = self.state\n cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART\n self.carttrans.set_translation(cartx, carty)\n self.poletrans.set_rotation(-x[2])\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n"
] | [
[
"numpy.finfo",
"numpy.array"
]
] |
aaronjones111/PassGAN | [
"2315bdabca7df99917f94a2ceb9de43e55a32618"
] | [
"models3.py"
] | [
"import tensorflow as tf\nimport tflib as lib\nimport tflib.ops.linear\nimport tflib.ops.conv1d\n\n\"\"\"\nUse this file for experimenting\n\ncompanion to samples3.py\n\"\"\"\n\ndef ResBlock(name, inputs, dim):\n # print(\"- Creating ResBlock -\")\n output = inputs\n output = tf.nn.relu(output)\n output = lib.ops.conv1d.Conv1D(name+'.1', dim, dim, 5, output)\n # print(\"After conv:\", output)\n output = tf.nn.relu(output)\n output = lib.ops.conv1d.Conv1D(name+'.2', dim, dim, 5, output)\n return inputs + (0.3*output)\n\ndef Generator(n_samples, seq_len, layer_dim, output_dim, prev_outputs=None):\n print(\"- Creating Generator -\")\n output = make_noise(shape=[n_samples, 128])\n print(\"Initialized:\", output)\n output = lib.ops.linear.Linear('Generator.Input', 128, seq_len * layer_dim, output)\n print(\"Lineared:\", output)\n output = tf.reshape(output, [-1, seq_len, layer_dim,])\n print(\"Reshaped:\", output)\n output = ResBlock('Generator.1', output, layer_dim)\n output = ResBlock('Generator.2', output, layer_dim)\n output = ResBlock('Generator.3', output, layer_dim)\n output = ResBlock('Generator.4', output, layer_dim)\n output = ResBlock('Generator.5', output, layer_dim)\n output = lib.ops.conv1d.Conv1D('Generator.Output', layer_dim, output_dim, 1, output)\n output = softmax(output, output_dim)\n return output\n\ndef Discriminator(inputs, seq_len, layer_dim, input_dim):\n output = inputs\n output = lib.ops.conv1d.Conv1D('Discriminator.Input', input_dim, layer_dim, 1, output)\n output = ResBlock('Discriminator.1', output, layer_dim)\n output = ResBlock('Discriminator.2', output, layer_dim)\n output = ResBlock('Discriminator.3', output, layer_dim)\n output = ResBlock('Discriminator.4', output, layer_dim)\n output = ResBlock('Discriminator.5', output, layer_dim)\n output = tf.reshape(output, [-1, seq_len * layer_dim])\n output = lib.ops.linear.Linear('Discriminator.Output', seq_len * layer_dim, 1, output)\n return output\n\ndef softmax(logits, num_classes):\n return tf.reshape(\n tf.nn.softmax(\n tf.reshape(logits, [-1, num_classes])\n ),\n tf.shape(logits)\n )\n\ndef make_noise(shape):\n return tf.random_normal(shape)\n"
] | [
[
"tensorflow.random_normal",
"tensorflow.shape",
"tensorflow.nn.relu",
"tensorflow.reshape"
]
] |
gafergus/CV_scientist | [
"7feb42c72e91d677c317e97c916be3eef0d8e270"
] | [
"cv_framework/cv_scientist.py"
] | [
"import gin\r\nimport math\r\nimport os\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\nfrom cv_framework.data_access.data_prep import FilePrep\r\nfrom cv_framework.model_definitions.model_utils import data_shape\r\nfrom cv_framework.training.model_comp import comp_model\r\nfrom cv_framework.data_access.generators import directory_flow\r\nfrom cv_framework.training.train import save_model, fit_generator\r\nfrom cv_framework.diagnostics import basic_diagnostics\r\nfrom cv_framework.model_definitions import standard_arch_dict\r\n\r\[email protected]\r\nclass CompVisExperiment:\r\n\r\n def __init__(self,\r\n base_directory=None,\r\n image_directory=None,\r\n experiment_name=None,\r\n labels_csv=None,\r\n file_name_column='file_name',\r\n labels_column='class',\r\n use_symlinks=False):\r\n\r\n exp_dir = os.path.join(base_directory, str(experiment_name))\r\n if not os.path.exists(exp_dir):\r\n try:\r\n os.makedirs(exp_dir)\r\n except Exception as e:\r\n print(f'Could not make experimental directory: {e}')\r\n\r\n print(\"Building Train/Test/Validation data directories.\")\r\n if not image_directory:\r\n image_directory = os.path.join(exp_dir, 'images')\r\n\r\n labels_csv_path = os.path.join(base_directory, labels_csv)\r\n file_prep = FilePrep(\r\n exp_directory=exp_dir,\r\n image_directory=image_directory,\r\n labels_csv_path=labels_csv_path,\r\n file_name_column=file_name_column,\r\n labels_column=labels_column,\r\n use_symlinks=use_symlinks)\r\n file_prep.create_modeling_dataset()\r\n\r\n self.unique_class_labels = file_prep.label_names\r\n self.train_dir = os.path.join(exp_dir + '/train')\r\n self.test_dir = os.path.join(exp_dir + '/test')\r\n\r\n\r\n def standard_models(self):\r\n print(list(standard_arch_dict.standard_dict.keys()))\r\n\r\n def _build_model_dict(self, model_dict):\r\n model_dictionary = {}\r\n for arch_name, model_list in model_dict.items():\r\n for model in model_list:\r\n model_dictionary[model] = standard_arch_dict.standard_dict[arch_name]\r\n return model_dictionary\r\n\r\n @gin.configurable\r\n def build_models(self, model_dict, summary=True):\r\n model_dictionary = self._build_model_dict(model_dict)\r\n compiled_models = {}\r\n for model_name, model_arch in model_dictionary.items():\r\n with gin.config_scope(model_name):\r\n _, in_shape, out_shape = data_shape()\r\n cnn_model = comp_model(\r\n model_name=model_name,\r\n model_arch=model_arch,\r\n input_shape=in_shape,\r\n classes=out_shape)\r\n compiled_models[model_name] = cnn_model\r\n if summary:\r\n compiled_models[model_name].summary()\r\n return compiled_models\r\n\r\n @gin.configurable\r\n def train_models(self, train_list, compiled_models, model_type='bin_classifier', save_figs=False,\r\n print_class_rep=True):\r\n score_dict = {}\r\n for model_name in tqdm(train_list):\r\n history_dict = {}\r\n with gin.config_scope(model_name):\r\n image_size, _, _ = data_shape()\r\n #print(f'\\nModel name: {model_name} \\nImage Size: {image_size}')\r\n train_gen = directory_flow(dir=self.train_dir, shuffle=True, image_size=image_size)\r\n test_gen = directory_flow(dir=self.test_dir, shuffle=False, image_size=image_size)\r\n save_name = str(model_name) + ('.h5')\r\n history = fit_generator(\r\n model_name=model_name,\r\n model=compiled_models[model_name],\r\n gen=train_gen,\r\n validation_data=test_gen)\r\n save_model(model=compiled_models[model_name], model_name=save_name)\r\n history_dict[model_name] = history.history\r\n test_gen.reset()\r\n preds = compiled_models[model_name].predict_generator(\r\n test_gen,\r\n verbose=1,\r\n steps=math.ceil(len(test_gen.classes)/test_gen.batch_size)\r\n )\r\n score_dict[model_name] = self.score_models(\r\n preds, model_name,\r\n history=history_dict[model_name],\r\n save_figs=save_figs,\r\n model_type=model_type,\r\n print_class_rep=print_class_rep,\r\n test_gen=test_gen)\r\n\r\n model_table = pd.DataFrame(score_dict).transpose().reset_index().rename(mapper={'index':'Model_Name'}, axis=1)\r\n return compiled_models, model_table\r\n\r\n @gin.configurable\r\n def score_models(self, preds, model, history=None, save_figs=False, model_type=None, print_class_rep=None,\r\n test_gen=None):\r\n if model_type == 'bin_classifier':\r\n return self._score_binary_classifiers(preds, model, history, save_figs, print_class_rep, test_gen)\r\n elif model_type == 'multiclass':\r\n return self._score_multi_classifiers(preds, model, history, save_figs, print_class_rep, test_gen)\r\n\r\n @gin.configurable\r\n def _score_binary_classifiers(self, preds, model, history, save_figs, print_class_rep, test_gen):\r\n model = str(model)\r\n sens, spec, roc_auc, class_rep, TP, TN, FP, FN, PPV, NPV, FPR, FNR = basic_diagnostics.binary_metrics(\r\n test_gen.classes, preds, history=history, save_figs=save_figs, class_names=self.unique_class_labels,\r\n model_name=model)\r\n model_scores = {'Sensitivity':sens, 'Specificity':spec, 'ROC_AUC_SCORE':roc_auc, 'True_Positives':TP,\r\n 'True_Negatives':TN, 'False_Positives':FP, 'False_Negatives':FN,\r\n 'Positive_Predictive_Value':PPV, 'Negative_Predictive_Value':NPV, 'False_Positive_Rate':FPR,\r\n 'False_Negative_Rate':FNR}\r\n\r\n if print_class_rep:\r\n print(class_rep)\r\n\r\n return model_scores\r\n\r\n @gin.configurable\r\n def _score_multi_classifiers(self, preds, model, history, save_figs, print_class_rep, test_gen):\r\n model = str(model)\r\n class_rep, TP, TN, FP, FN, PPV, NPV, FPR, FNR = basic_diagnostics.multi_metrics(\r\n test_gen.classes, preds, history=history, save_figs=save_figs, class_names=self.unique_class_labels,\r\n model_name=model)\r\n model_scores = {'True_Positives':TP, 'True_Negatives':TN, 'False_Positives':FP, 'False_Negatives':FN,\r\n 'Positive_Predictive_Value':PPV, 'Negative_Predictive_Value':NPV, 'False_Positive_Rate':FPR,\r\n 'False_Negative_Rate':FNR}\r\n\r\n if print_class_rep:\r\n print(class_rep)\r\n\r\n return model_scores\r\n\r\n\r\n\r\n"
] | [
[
"pandas.DataFrame"
]
] |
CedricDViou/nenupy-tf | [
"cc048da782b4e3584e5406558c89eaae04425c09"
] | [
"nenupytf/process/analysis.py"
] | [
"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n ********\n analysis\n ********\n\n Test de docstring\n\"\"\"\n\n\n__author__ = ['Alan Loh']\n__copyright__ = 'Copyright 2019, nenupytf'\n__credits__ = ['Alan Loh']\n__maintainer__ = 'Alan Loh'\n__email__ = '[email protected]'\n__status__ = 'Production'\n__all__ = [\n 'switch_shape',\n 'find_switches',\n 'gain_jumps',\n ]\n\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom nenupytf.stokes import SpecData\n\n\n# ============================================================= #\n# ------------------------ switch_shape ----------------------- #\n# ============================================================= #\ndef switch_shape(x, y):\n \"\"\" Each time an analog pointing happens, the switch commuters\n may have a variable degraded response that we need to\n correct for.\n This function aims at fitting such a profile in order to\n correct for this effect.\n\n Parameters\n ----------\n x : `np.ndarray`\n Time converted in float (MJD or JD)\n y : `np.ndarray`\n Profile we want to fit in dB.\n\n Returns\n -------\n shape : `np.ndarray`\n The fitted profile in dB, same dimensions as `x` and `y`\n \"\"\"\n def switch_fit(x, a, b, c, d):\n \"\"\" Exponential growth to a plateau\n \"\"\"\n return a*(1 - np.exp(-b*x + c)) + d\n errors = np.repeat(\n np.std(y)/10,\n y.size)\n errors[0] *= 1e-1\n p_opt, p_cov = curve_fit(\n switch_fit,\n x - x[0],\n switches[t_mask],\n sigma=errors\n )\n a, b, c, d = p_opt\n return switch_fit(x - x[0], a, b, c, d)\n\n\n# ============================================================= #\n# ----------------------- find_switches ----------------------- #\n# ============================================================= #\ndef find_switches(x, y):\n return\n\n\n# ============================================================= #\n# ------------------------ gain_jumps ------------------------- #\n# ============================================================= #\ndef gain_jumps(spec):\n \"\"\"\n \"\"\"\n if not isinstance(spec, SpecData):\n raise TypeError(\n 'This method works with a SpecData object'\n )\n medf = np.median(spec.amp, axis=1)\n\n\n\n"
] | [
[
"numpy.median",
"numpy.std",
"scipy.optimize.curve_fit",
"numpy.exp"
]
] |
b04901014/keras | [
"016d85c9e6d8a36fe7107e32752f6a9cd8d77c86"
] | [
"keras/backend/tensorflow_backend.py"
] | [
"import tensorflow as tf\n\nfrom tensorflow.python.training import moving_averages\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import control_flow_ops\ntry:\n from tensorflow.python.ops import ctc_ops as ctc\nexcept ImportError:\n import tensorflow.contrib.ctc as ctc\n\nimport numpy as np\nimport os\nimport copy\nimport warnings\nfrom .common import _FLOATX, _EPSILON, image_dim_ordering, reset_uids\npy_all = all\n\n# INTERNAL UTILS\n\n# This is the default internal TF session used by Keras.\n# It can be set manually via `set_session(sess)`.\n_SESSION = None\n# This dictionary holds a mapping {graph: learning_phase}.\n# A learning phase is a bool tensor used to run Keras models in\n# either train mode (learning_phase == 1) or test mode (learning_phase == 0).\n_GRAPH_LEARNING_PHASES = {}\n# This boolean flag can be set to True to leave variable initialization\n# up to the user.\n# Change its value via `manual_variable_initialization(value)`.\n_MANUAL_VAR_INIT = False\n\n\ndef clear_session():\n '''Destroys the current TF graph and creates a new one.\n\n Useful to avoid clutter from old models / layers.\n '''\n global _SESSION\n global _GRAPH_LEARNING_PHASES\n tf.reset_default_graph()\n reset_uids()\n _SESSION = None\n phase = tf.placeholder(dtype='bool', name='keras_learning_phase')\n _GRAPH_LEARNING_PHASES[tf.get_default_graph()] = phase\n\n\ndef manual_variable_initialization(value):\n '''Returns a boolean:\n whether variables should be initialized\n as they are instantiated (default), or if\n the user should handle the initialization\n (e.g. via tf.initialize_all_variables()).\n '''\n global _MANUAL_VAR_INIT\n _MANUAL_VAR_INIT = value\n\n\ndef learning_phase():\n '''Returns the learning phase flag.\n\n The learning phase flag is a bool tensor (0 = test, 1 = train)\n to be passed as input to any Keras function\n that uses a different behavior at train time and test time.\n '''\n graph = tf.get_default_graph()\n if graph not in _GRAPH_LEARNING_PHASES:\n phase = tf.placeholder(dtype='bool',\n name='keras_learning_phase')\n _GRAPH_LEARNING_PHASES[graph] = phase\n return _GRAPH_LEARNING_PHASES[graph]\n\n\ndef set_learning_phase(value):\n '''Sets the learning phase to a fixed value,\n either 0 or 1 (integers).\n '''\n global _GRAPH_LEARNING_PHASES\n if value not in {0, 1}:\n raise ValueError('Expected learning phase to be '\n '0 or 1.')\n _GRAPH_LEARNING_PHASES[tf.get_default_graph()] = value\n\n\ndef get_session():\n '''Returns the TF session to be used by the backend.\n\n If a default TensorFlow session is available, we will return it.\n\n Else, we will return the global Keras session.\n\n If no global Keras session exists at this point:\n we will create a new global session.\n\n Note that you can manually set the global session\n via `K.set_session(sess)`.\n '''\n global _SESSION\n if tf.get_default_session() is not None:\n session = tf.get_default_session()\n else:\n if _SESSION is None:\n if not os.environ.get('OMP_NUM_THREADS'):\n config = tf.ConfigProto(allow_soft_placement=True)\n else:\n nb_thread = int(os.environ.get('OMP_NUM_THREADS'))\n config = tf.ConfigProto(intra_op_parallelism_threads=nb_thread,\n allow_soft_placement=True)\n _SESSION = tf.Session(config=config)\n session = _SESSION\n if not _MANUAL_VAR_INIT:\n _initialize_variables()\n return session\n\n\ndef set_session(session):\n '''Sets the global TF session.\n '''\n global _SESSION\n _SESSION = session\n\n\n# VARIABLE MANIPULATION\n\ndef _convert_string_dtype(dtype):\n if dtype == 'float16':\n return tf.float16\n if dtype == 'float32':\n return tf.float32\n elif dtype == 'float64':\n return tf.float64\n elif dtype == 'int16':\n return tf.int16\n elif dtype == 'int32':\n return tf.int32\n elif dtype == 'int64':\n return tf.int64\n elif dtype == 'uint8':\n return tf.int8\n elif dtype == 'uint16':\n return tf.uint16\n else:\n raise ValueError('Unsupported dtype:', dtype)\n\n\ndef _to_tensor(x, dtype):\n x = tf.convert_to_tensor(x)\n if x.dtype != dtype:\n x = tf.cast(x, dtype)\n return x\n\n\ndef is_sparse(tensor):\n return isinstance(tensor, tf.SparseTensor)\n\n\ndef to_dense(tensor):\n if is_sparse(tensor):\n return tf.sparse_tensor_to_dense(tensor)\n else:\n return tensor\n\n\ndef variable(value, dtype=_FLOATX, name=None):\n '''Instantiates a tensor.\n\n # Arguments\n value: numpy array, initial value of the tensor.\n dtype: tensor type.\n name: optional name string for the tensor.\n\n # Returns\n Tensor variable instance.\n '''\n if hasattr(value, 'tocoo'):\n sparse_coo = value.tocoo()\n indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),\n np.expand_dims(sparse_coo.col, 1)), 1)\n # SparseTensor doesn't need initialization\n v = tf.SparseTensor(indices=indices, values=sparse_coo.data, shape=sparse_coo.shape)\n v._dims = len(sparse_coo.shape)\n return v\n v = tf.Variable(value, dtype=_convert_string_dtype(dtype), name=name)\n return v\n\n\ndef _initialize_variables():\n variables = tf.all_variables()\n uninitialized_variables = []\n for v in variables:\n if not hasattr(v, '_keras_initialized') or not v._keras_initialized:\n uninitialized_variables.append(v)\n v._keras_initialized = True\n if uninitialized_variables:\n sess = get_session()\n sess.run(tf.initialize_variables(uninitialized_variables))\n\n\ndef placeholder(shape=None, ndim=None, dtype=_FLOATX, sparse=False, name=None):\n '''Instantiates a placeholder.\n\n # Arguments\n shape: shape of the placeholder\n (integer tuple, may include None entries).\n ndim: number of axes of the tensor.\n At least one of {`shape`, `ndim`} must be specified.\n If both are specified, `shape` is used.\n dtype: placeholder type.\n name: optional name string for the placeholder.\n\n # Returns\n Placeholder tensor instance.\n '''\n if not shape:\n if ndim:\n shape = tuple([None for _ in range(ndim)])\n if sparse:\n x = tf.sparse_placeholder(dtype, name=name)\n x._dims = len(shape)\n else:\n x = tf.placeholder(dtype, shape=shape, name=name)\n x._keras_shape = shape\n x._uses_learning_phase = False\n return x\n\n\ndef shape(x):\n '''Returns the symbolic shape of a tensor.\n '''\n return tf.shape(x)\n\n\ndef int_shape(x):\n '''Returns the shape of a tensor as a tuple of\n integers or None entries.\n Note that this function only works with TensorFlow.\n '''\n shape = x.get_shape()\n return tuple([i.__int__() for i in shape])\n\n\ndef ndim(x):\n '''Returns the number of axes in a tensor, as an integer.\n '''\n if is_sparse(x):\n return x._dims\n\n dims = x.get_shape()._dims\n if dims is not None:\n return len(dims)\n return None\n\n\ndef dtype(x):\n '''Returns the dtype of a tensor, as a string.\n '''\n return x.dtype.name\n\n\ndef eval(x):\n '''Evaluates the value of a tensor.\n Returns a Numpy array.\n '''\n return to_dense(x).eval(session=get_session())\n\n\ndef zeros(shape, dtype=_FLOATX, name=None):\n '''Instantiates an all-zeros tensor variable.\n '''\n shape = tuple(map(int, shape))\n tf_dtype = _convert_string_dtype(dtype)\n return variable(tf.constant_initializer(0., dtype=tf_dtype)(shape),\n dtype, name)\n\n\ndef ones(shape, dtype=_FLOATX, name=None):\n '''Instantiates an all-ones tensor variable.\n '''\n shape = tuple(map(int, shape))\n tf_dtype = _convert_string_dtype(dtype)\n return variable(tf.constant_initializer(1., dtype=tf_dtype)(shape),\n dtype, name)\n\n\ndef eye(size, dtype=_FLOATX, name=None):\n '''Instantiate an identity matrix.\n '''\n return variable(np.eye(size), dtype, name)\n\n\ndef zeros_like(x, name=None):\n '''Instantiates an all-zeros tensor\n of the same shape as another tensor.\n '''\n return tf.zeros_like(x, name=name)\n\n\ndef ones_like(x, name=None):\n '''Instantiates an all-ones tensor\n of the same shape as another tensor.\n '''\n return tf.ones_like(x, name=name)\n\n\ndef random_uniform_variable(shape, low, high, dtype=_FLOATX,\n name=None, seed=None):\n shape = tuple(map(int, shape))\n tf_dtype = _convert_string_dtype(dtype)\n if seed is None:\n # ensure that randomness is conditioned by the Numpy RNG\n seed = np.random.randint(10e8)\n value = tf.random_uniform_initializer(\n low, high, dtype=tf_dtype, seed=seed)(shape)\n return variable(value, dtype=dtype, name=name)\n\n\ndef random_normal_variable(shape, mean, scale, dtype=_FLOATX,\n name=None, seed=None):\n shape = tuple(map(int, shape))\n tf_dtype = _convert_string_dtype(dtype)\n if seed is None:\n # ensure that randomness is conditioned by the Numpy RNG\n seed = np.random.randint(10e8)\n value = tf.random_normal_initializer(\n mean, scale, dtype=tf_dtype, seed=seed)(shape)\n return variable(value, dtype=dtype, name=name)\n\n\ndef count_params(x):\n '''Returns the number of scalars in a tensor.\n '''\n shape = x.get_shape()\n return np.prod([shape[i]._value for i in range(len(shape))])\n\n\ndef cast(x, dtype):\n '''Casts a tensor to a different dtype.\n '''\n return tf.cast(x, dtype)\n\n\n# UPDATES OPS\n\n\ndef update(x, new_x):\n return tf.assign(x, new_x)\n\n\ndef update_add(x, increment):\n return tf.assign_add(x, increment)\n\n\ndef update_sub(x, decrement):\n return tf.assign_sub(x, decrement)\n\n\ndef moving_average_update(variable, value, momentum):\n return moving_averages.assign_moving_average(\n variable, value, momentum)\n\n\n# LINEAR ALGEBRA\n\ndef dot(x, y):\n '''Multiplies 2 tensors.\n When attempting to multiply a ND tensor\n with a ND tensor, reproduces the Theano behavior\n (e.g. (2, 3).(4, 3, 5) = (2, 4, 5))\n '''\n if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):\n x_shape = (-1,) + int_shape(x)[1:]\n y_shape = int_shape(y)\n y_permute_dim = list(range(ndim(y)))\n y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim\n xt = tf.reshape(x, [-1, x_shape[-1]])\n yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])\n return tf.reshape(tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])\n if is_sparse(x):\n out = tf.sparse_tensor_dense_matmul(x, y)\n else:\n out = tf.matmul(x, y)\n return out\n\n\ndef batch_dot(x, y, axes=None):\n '''Batchwise dot product.\n\n batch_dot results in a tensor with less dimensions than the input.\n If the number of dimensions is reduced to 1, we use `expand_dims` to\n make sure that ndim is at least 2.\n\n # Arguments\n x, y: tensors with ndim >= 2\n axes: list (or single) int with target dimensions\n\n # Returns\n A tensor with shape equal to the concatenation of x's shape\n (less the dimension that was summed over) and y's shape\n (less the batch dimension and the dimension that was summed over).\n If the final rank is 1, we reshape it to (batch_size, 1).\n\n # Examples\n Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]]\n batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal\n of x.dot(y.T), although we never have to calculate the off-diagonal\n elements.\n\n Shape inference:\n Let x's shape be (100, 20) and y's shape be (100, 30, 20).\n If dot_axes is (1, 2), to find the output shape of resultant tensor,\n loop through each dimension in x's shape and y's shape:\n x.shape[0] : 100 : append to output shape\n x.shape[1] : 20 : do not append to output shape,\n dimension 1 of x has been summed over. (dot_axes[0] = 1)\n y.shape[0] : 100 : do not append to output shape,\n always ignore first dimension of y\n y.shape[1] : 30 : append to output shape\n y.shape[2] : 20 : do not append to output shape,\n dimension 2 of y has been summed over. (dot_axes[1] = 2)\n\n output_shape = (100, 30)\n '''\n if type(axes) == int:\n axes = (axes, axes)\n if axes is not None:\n adj_x = None if axes[0] == ndim(x) - 1 else True\n adj_y = True if axes[1] == ndim(y) - 1 else None\n else:\n adj_x = None\n adj_y = None\n out = tf.batch_matmul(x, y, adj_x=adj_x, adj_y=adj_y)\n if ndim(out) == 1:\n out = expand_dims(out, 1)\n return out\n\n\ndef transpose(x):\n '''Transposes a matrix.\n '''\n return tf.transpose(x)\n\n\ndef gather(reference, indices):\n '''Retrieves the vectors of indices `indices`\n in the 2D tensor `reference`.\n\n # Arguments\n reference: a 2D tensor.\n indices: an int tensor of indices.\n\n # Returns\n A 3D tensor of same type as `reference`.\n '''\n return tf.gather(reference, indices)\n\n\n# ELEMENT-WISE OPERATIONS\n\ndef _normalize_axis(axis, ndim):\n if type(axis) is tuple:\n axis = list(axis)\n if type(axis) is list:\n for i, a in enumerate(axis):\n if a is not None and a < 0:\n axis[i] = a % ndim\n else:\n if axis is not None and axis < 0:\n axis = axis % ndim\n return axis\n\n\ndef max(x, axis=None, keepdims=False):\n '''Maximum value in a tensor.\n '''\n axis = _normalize_axis(axis, ndim(x))\n return tf.reduce_max(x, reduction_indices=axis, keep_dims=keepdims)\n\n\ndef min(x, axis=None, keepdims=False):\n '''Minimum value in a tensor.\n '''\n axis = _normalize_axis(axis, ndim(x))\n return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)\n\n\ndef sum(x, axis=None, keepdims=False):\n '''Sum of the values in a tensor, alongside the specified axis.\n '''\n axis = _normalize_axis(axis, ndim(x))\n return tf.reduce_sum(x, reduction_indices=axis, keep_dims=keepdims)\n\n\ndef prod(x, axis=None, keepdims=False):\n '''Multiplies the values in a tensor, alongside the specified axis.\n '''\n axis = _normalize_axis(axis, ndim(x))\n return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)\n\n\ndef var(x, axis=None, keepdims=False):\n '''Variance of a tensor, alongside the specified axis.\n '''\n axis = _normalize_axis(axis, ndim(x))\n if x.dtype.base_dtype == tf.bool:\n x = tf.cast(x, _FLOATX)\n m = tf.reduce_mean(x, reduction_indices=axis, keep_dims=True)\n devs_squared = tf.square(x - m)\n return tf.reduce_mean(devs_squared,\n reduction_indices=axis,\n keep_dims=keepdims)\n\n\ndef std(x, axis=None, keepdims=False):\n '''Standard deviation of a tensor, alongside the specified axis.\n '''\n return tf.sqrt(var(x, axis=axis, keepdims=keepdims))\n\n\ndef mean(x, axis=None, keepdims=False):\n '''Mean of a tensor, alongside the specified axis.\n '''\n axis = _normalize_axis(axis, ndim(x))\n if x.dtype.base_dtype == tf.bool:\n x = tf.cast(x, _FLOATX)\n return tf.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)\n\n\ndef any(x, axis=None, keepdims=False):\n '''Bitwise reduction (logical OR).\n\n Returns an uint8 tensor (0s and 1s).\n '''\n axis = _normalize_axis(axis, ndim(x))\n x = tf.cast(x, tf.bool)\n x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)\n return tf.cast(x, tf.uint8)\n\n\ndef all(x, axis=None, keepdims=False):\n '''Bitwise reduction (logical AND).\n\n Returns an uint8 tensor\n '''\n axis = _normalize_axis(axis, ndim(x))\n x = tf.cast(x, tf.bool)\n x = tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)\n return tf.cast(x, tf.uint8)\n\n\ndef argmax(x, axis=-1):\n '''Returns the index of the maximum value\n along a tensor axis.\n '''\n if axis < 0:\n axis = axis % len(x.get_shape())\n return tf.argmax(x, axis)\n\n\ndef argmin(x, axis=-1):\n '''Returns the index of the minimum value\n along a tensor axis.\n '''\n if axis < 0:\n axis = axis % len(x.get_shape())\n return tf.argmin(x, axis)\n\n\ndef square(x):\n '''Element-wise square.\n '''\n return tf.square(x)\n\n\ndef abs(x):\n '''Element-wise absolute value.\n '''\n return tf.abs(x)\n\n\ndef sqrt(x):\n '''Element-wise square root.\n '''\n zero = _to_tensor(0., x.dtype.base_dtype)\n inf = _to_tensor(np.inf, x.dtype.base_dtype)\n x = tf.clip_by_value(x, zero, inf)\n return tf.sqrt(x)\n\n\ndef exp(x):\n '''Element-wise exponential.\n '''\n return tf.exp(x)\n\n\ndef log(x):\n '''Element-wise log.\n '''\n return tf.log(x)\n\n\ndef round(x):\n '''Element-wise rounding to the closest integer.\n '''\n return tf.round(x)\n\n\ndef sign(x):\n '''Element-wise sign.\n '''\n return tf.sign(x)\n\n\ndef pow(x, a):\n '''Element-wise exponentiation.\n '''\n return tf.pow(x, a)\n\n\ndef clip(x, min_value, max_value):\n '''Element-wise value clipping.\n '''\n if max_value < min_value:\n max_value = min_value\n min_value = _to_tensor(min_value, x.dtype.base_dtype)\n max_value = _to_tensor(max_value, x.dtype.base_dtype)\n return tf.clip_by_value(x, min_value, max_value)\n\n\ndef equal(x, y):\n '''Element-wise equality between two tensors.\n Returns a bool tensor.\n '''\n return tf.equal(x, y)\n\n\ndef not_equal(x, y):\n '''Element-wise inequality between two tensors.\n Returns a bool tensor.\n '''\n return tf.not_equal(x, y)\n\n\ndef greater(x, y):\n '''Element-wise truth value of (x > y).\n Returns a bool tensor.\n '''\n return tf.greater(x, y)\n\n\ndef greater_equal(x, y):\n '''Element-wise truth value of (x >= y).\n Returns a bool tensor.\n '''\n return tf.greater_equal(x, y)\n\n\ndef lesser(x, y):\n '''Element-wise truth value of (x < y).\n Returns a bool tensor.\n '''\n return tf.less(x, y)\n\n\ndef lesser_equal(x, y):\n '''Element-wise truth value of (x <= y).\n Returns a bool tensor.\n '''\n return tf.less_equal(x, y)\n\n\ndef maximum(x, y):\n '''Element-wise maximum of two tensors.\n '''\n return tf.maximum(x, y)\n\n\ndef minimum(x, y):\n '''Element-wise minimum of two tensors.\n '''\n return tf.minimum(x, y)\n\n\ndef sin(x):\n '''Computes sin of x element-wise.\n '''\n return tf.sin(x)\n\n\ndef cos(x):\n '''Computes cos of x element-wise.\n '''\n return tf.cos(x)\n\n\ndef normalize_batch_in_training(x, gamma, beta,\n reduction_axes, epsilon=0.0001):\n '''Compute mean and std for batch then apply batch_normalization on batch.\n '''\n mean, var = tf.nn.moments(x, reduction_axes,\n shift=None, name=None, keep_dims=False)\n if sorted(reduction_axes) == range(ndim(x))[:-1]:\n normed = tf.nn.batch_normalization(x, mean, var,\n beta, gamma,\n epsilon)\n else:\n # need broadcasting\n target_shape = []\n for axis in range(ndim(x)):\n if axis in reduction_axes:\n target_shape.append(1)\n else:\n target_shape.append(tf.shape(x)[axis])\n target_shape = tf.pack(target_shape)\n\n broadcast_mean = tf.reshape(mean, target_shape)\n broadcast_var = tf.reshape(var, target_shape)\n broadcast_gamma = tf.reshape(gamma, target_shape)\n broadcast_beta = tf.reshape(beta, target_shape)\n normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var,\n broadcast_beta, broadcast_gamma,\n epsilon)\n return normed, mean, var\n\n\ndef batch_normalization(x, mean, var, beta, gamma, epsilon=0.0001):\n '''Apply batch normalization on x given mean, var, beta and gamma:\n\n output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta\n '''\n return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)\n\n\n# SHAPE OPERATIONS\n\ndef concatenate(tensors, axis=-1):\n '''Concantes a list of tensors alongside the specified axis.\n '''\n if axis < 0:\n dims = ndim(tensors[0])\n if dims:\n axis = axis % dims\n else:\n axis = 0\n\n if py_all([is_sparse(x) for x in tensors]):\n return tf.sparse_concat(axis, tensors)\n else:\n return tf.concat(axis, [to_dense(x) for x in tensors])\n\n\ndef reshape(x, shape):\n '''Reshapes a tensor to the specified shape.\n '''\n return tf.reshape(x, shape)\n\n\ndef permute_dimensions(x, pattern):\n '''Permutes axes in a tensor.\n\n # Arguments\n pattern: should be a tuple of\n dimension indices, e.g. (0, 2, 1).\n '''\n return tf.transpose(x, perm=pattern)\n\n\ndef resize_images(X, height_factor, width_factor, dim_ordering):\n '''Resizes the images contained in a 4D tensor of shape\n - [batch, channels, height, width] (for 'th' dim_ordering)\n - [batch, height, width, channels] (for 'tf' dim_ordering)\n by a factor of (height_factor, width_factor). Both factors should be\n positive integers.\n '''\n if dim_ordering == 'th':\n original_shape = int_shape(X)\n new_shape = tf.shape(X)[2:]\n new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))\n X = permute_dimensions(X, [0, 2, 3, 1])\n X = tf.image.resize_nearest_neighbor(X, new_shape)\n X = permute_dimensions(X, [0, 3, 1, 2])\n X.set_shape((None, None, original_shape[2] * height_factor if original_shape[2] is not None else None,\n original_shape[3] * width_factor if original_shape[3] is not None else None))\n return X\n elif dim_ordering == 'tf':\n original_shape = int_shape(X)\n new_shape = tf.shape(X)[1:3]\n new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))\n X = tf.image.resize_nearest_neighbor(X, new_shape)\n X.set_shape((None, original_shape[1] * height_factor if original_shape[1] is not None else None,\n original_shape[2] * width_factor if original_shape[2] is not None else None, None))\n return X\n else:\n raise Exception('Invalid dim_ordering: ' + dim_ordering)\n\n\ndef resize_volumes(X, depth_factor, height_factor, width_factor, dim_ordering):\n '''Resize the volume contained in a 5D tensor of shape\n - [batch, channels, depth, height, width] (for 'th' dim_ordering)\n - [batch, depth, height, width, channels] (for 'tf' dim_ordering)\n by a factor of (depth_factor, height_factor, width_factor).\n All three factors should be positive integers.\n '''\n if dim_ordering == 'th':\n output = repeat_elements(X, depth_factor, axis=2)\n output = repeat_elements(output, height_factor, axis=3)\n output = repeat_elements(output, width_factor, axis=4)\n return output\n elif dim_ordering == 'tf':\n output = repeat_elements(X, depth_factor, axis=1)\n output = repeat_elements(output, height_factor, axis=2)\n output = repeat_elements(output, width_factor, axis=3)\n return output\n else:\n raise Exception('Invalid dim_ordering: ' + dim_ordering)\n\n\ndef repeat_elements(x, rep, axis):\n '''Repeats the elements of a tensor along an axis, like np.repeat\n\n If x has shape (s1, s2, s3) and axis=1, the output\n will have shape (s1, s2 * rep, s3)\n '''\n x_shape = x.get_shape().as_list()\n # slices along the repeat axis\n splits = tf.split(axis, x_shape[axis], x)\n # repeat each slice the given number of reps\n x_rep = [s for s in splits for i in range(rep)]\n return tf.concat(axis, x_rep)\n\n\ndef repeat(x, n):\n '''Repeats a 2D tensor:\n\n if x has shape (samples, dim) and n=2,\n the output will have shape (samples, 2, dim)\n '''\n assert ndim(x) == 2\n x = tf.expand_dims(x, 1)\n pattern = tf.pack([1, n, 1])\n return tf.tile(x, pattern)\n\n\ndef tile(x, n):\n if not hasattr(n, 'shape') and not hasattr(n, '__len__') and not hasattr(n, '_shape'):\n n = [n]\n return tf.tile(x, n)\n\n\ndef flatten(x):\n return tf.reshape(x, [-1])\n\n\ndef batch_flatten(x):\n '''Turn a n-D tensor into a 2D tensor where\n the first dimension is conserved.\n '''\n x = tf.reshape(x, tf.pack([-1, prod(shape(x)[1:])]))\n return x\n\n\ndef expand_dims(x, dim=-1):\n '''Adds a 1-sized dimension at index \"dim\".\n '''\n return tf.expand_dims(x, dim)\n\n\ndef squeeze(x, axis):\n '''Removes a 1-dimension from the tensor at index \"axis\".\n '''\n return tf.squeeze(x, [axis])\n\n\ndef temporal_padding(x, padding=1):\n '''Pads the middle dimension of a 3D tensor\n with \"padding\" zeros left and right.\n '''\n pattern = [[0, 0], [padding, padding], [0, 0]]\n return tf.pad(x, pattern)\n\n\ndef asymmetric_temporal_padding(x, left_pad=1, right_pad=1):\n '''Pad the middle dimension of a 3D tensor\n with \"left_pad\" zeros left and \"right_pad\" right.\n '''\n pattern = [[0, 0], [left_pad, right_pad], [0, 0]]\n return tf.pad(x, pattern)\n\n\ndef spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):\n '''Pads the 2nd and 3rd dimensions of a 4D tensor\n with \"padding[0]\" and \"padding[1]\" (resp.) zeros left and right.\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n if dim_ordering == 'th':\n pattern = [[0, 0], [0, 0],\n [padding[0], padding[0]], [padding[1], padding[1]]]\n else:\n pattern = [[0, 0],\n [padding[0], padding[0]], [padding[1], padding[1]],\n [0, 0]]\n return tf.pad(x, pattern)\n\n\ndef asymmetric_spatial_2d_padding(x, top_pad=1, bottom_pad=1,\n left_pad=1, right_pad=1,\n dim_ordering='default'):\n '''Pad the rows and columns of a 4D tensor\n with \"top_pad\", \"bottom_pad\", \"left_pad\", \"right_pad\" (resp.) zeros\n rows on top, bottom; cols on left, right.\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n if dim_ordering == 'th':\n pattern = [[0, 0],\n [0, 0],\n [top_pad, bottom_pad],\n [left_pad, right_pad]]\n else:\n pattern = [[0, 0],\n [top_pad, bottom_pad],\n [left_pad, right_pad],\n [0, 0]]\n return tf.pad(x, pattern)\n\n\ndef spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='default'):\n '''Pads 5D tensor with zeros for the depth, height, width dimension with\n \"padding[0]\", \"padding[1]\" and \"padding[2]\" (resp.) zeros left and right\n\n For 'tf' dim_ordering, the 2nd, 3rd and 4th dimension will be padded.\n For 'th' dim_ordering, the 3rd, 4th and 5th dimension will be padded.\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n if dim_ordering == 'th':\n pattern = [\n [0, 0],\n [0, 0],\n [padding[0], padding[0]],\n [padding[1], padding[1]],\n [padding[2], padding[2]]\n ]\n else:\n pattern = [\n [0, 0],\n [padding[0], padding[0]],\n [padding[1], padding[1]],\n [padding[2], padding[2]],\n [0, 0]\n ]\n return tf.pad(x, pattern)\n\n\ndef pack(x):\n return tf.pack(x)\n\n\ndef one_hot(indices, nb_classes):\n '''Input: nD integer tensor of shape (batch_size, dim1, dim2, ... dim(n-1))\n Output: (n + 1)D one hot representation of the input\n with shape (batch_size, dim1, dim2, ... dim(n-1), nb_classes)\n '''\n return tf.one_hot(indices, depth=nb_classes, axis=-1)\n\n\ndef reverse(x, axes):\n '''Reverse a tensor along the the specified axes\n '''\n if type(axes) == int:\n axes = [axes]\n dims = [True if i in axes else False for i in range(len(x.get_shape()._dims))]\n return tf.reverse(x, dims)\n\n\n# VALUE MANIPULATION\n\n\ndef get_value(x):\n '''Returns the value of a tensor variable,\n as a Numpy array.\n '''\n return x.eval(session=get_session())\n\n\ndef batch_get_value(xs):\n '''Returns the value of more than one tensor variable,\n as a list of Numpy arrays.\n '''\n if xs:\n return get_session().run(xs)\n else:\n return []\n\n\ndef set_value(x, value):\n '''Sets the value of a tensor variable,\n from a Numpy array.\n '''\n value = np.asarray(value)\n tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])\n if hasattr(x, '_assign_placeholder'):\n assign_placeholder = x._assign_placeholder\n assign_op = x._assign_op\n else:\n assign_placeholder = tf.placeholder(tf_dtype, shape=value.shape)\n assign_op = x.assign(assign_placeholder)\n x._assign_placeholder = assign_placeholder\n x._assign_op = assign_op\n get_session().run(assign_op, feed_dict={assign_placeholder: value})\n\n\ndef batch_set_value(tuples):\n '''Sets the values of many tensor variables at once.\n\n # Arguments\n tuples: a list of tuples `(tensor, value)`.\n `value` should be a Numpy array.\n '''\n if tuples:\n assign_ops = []\n feed_dict = {}\n for x, value in tuples:\n value = np.asarray(value)\n tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])\n if hasattr(x, '_assign_placeholder'):\n assign_placeholder = x._assign_placeholder\n assign_op = x._assign_op\n else:\n assign_placeholder = tf.placeholder(tf_dtype, shape=value.shape)\n assign_op = x.assign(assign_placeholder)\n x._assign_placeholder = assign_placeholder\n x._assign_op = assign_op\n assign_ops.append(assign_op)\n feed_dict[assign_placeholder] = value\n get_session().run(assign_ops, feed_dict=feed_dict)\n\n\ndef get_variable_shape(x):\n return int_shape(x)\n\n\ndef print_tensor(x, message=''):\n '''Print the message and the tensor when evaluated and return the same\n tensor.\n '''\n return tf.Print(x, [x], message)\n\n\n# GRAPH MANIPULATION\n\nclass Function(object):\n\n def __init__(self, inputs, outputs, updates=[]):\n assert type(inputs) in {list, tuple}, 'Input to a TensorFlow backend function should be a list or tuple.'\n assert type(outputs) in {list, tuple}, 'Output to a TensorFlow backend function should be a list or tuple.'\n assert type(updates) in {list, tuple}, 'Updates in a TensorFlow backend function should be a list or tuple.'\n self.inputs = list(inputs)\n self.outputs = list(outputs)\n with tf.control_dependencies(self.outputs):\n updates_ops = []\n for update in updates:\n if type(update) is tuple:\n p, new_p = update\n updates_ops.append(tf.assign(p, new_p))\n else:\n # assumed already an op\n updates_ops.append(update)\n self.updates_op = tf.group(*updates_ops)\n\n def __call__(self, inputs):\n assert type(inputs) in {list, tuple}\n feed_dict = {}\n for tensor, value in zip(self.inputs, inputs):\n if is_sparse(tensor):\n sparse_coo = value.tocoo()\n indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),\n np.expand_dims(sparse_coo.col, 1)), 1)\n value = (indices, sparse_coo.data, sparse_coo.shape)\n feed_dict[tensor] = value\n session = get_session()\n updated = session.run(self.outputs + [self.updates_op], feed_dict=feed_dict)\n return updated[:len(self.outputs)]\n\n\ndef function(inputs, outputs, updates=[], **kwargs):\n '''Instantiates a Keras function.\n\n # Arguments\n inputs: list of placeholder/variable tensors.\n outputs: list of output tensors.\n updates: list of update tuples (old_tensor, new_tensor).\n '''\n if len(kwargs) > 0:\n msg = [\n 'Expected no kwargs, you passed %s' % len(kwargs),\n 'kwargs passed to function are ignored with Tensorflow backend'\n ]\n warnings.warn('\\n'.join(msg))\n return Function(inputs, outputs, updates=updates)\n\n\ndef gradients(loss, variables):\n '''Returns the gradients of `variables` (list of tensor variables)\n with regard to `loss`.\n '''\n return tf.gradients(loss, variables, colocate_gradients_with_ops=True)\n\n\ndef stop_gradient(variables):\n '''Returns `variables` but with zero gradient with respect to every other\n variables.\n '''\n return tf.stop_gradient(variables)\n\n\n# CONTROL FLOW\n\ndef rnn(step_function, inputs, initial_states,\n go_backwards=False, mask=None, constants=None,\n unroll=False, input_length=None):\n '''Iterates over the time dimension of a tensor.\n\n # Arguments\n inputs: tensor of temporal data of shape (samples, time, ...)\n (at least 3D).\n step_function:\n Parameters:\n input: tensor with shape (samples, ...) (no time dimension),\n representing input for the batch of samples at a certain\n time step.\n states: list of tensors.\n Returns:\n output: tensor with shape (samples, output_dim) (no time dimension),\n new_states: list of tensors, same length and shapes\n as 'states'. The first state in the list must be the\n output tensor at the previous timestep.\n initial_states: tensor with shape (samples, output_dim) (no time dimension),\n containing the initial values for the states used in\n the step function.\n go_backwards: boolean. If True, do the iteration over\n the time dimension in reverse order.\n mask: binary tensor with shape (samples, time, 1),\n with a zero for every element that is masked.\n constants: a list of constant values passed at each step.\n unroll: with TensorFlow the RNN is always unrolled, but with Theano you\n can use this boolean flag to unroll the RNN.\n input_length: not relevant in the TensorFlow implementation.\n Must be specified if using unrolling with Theano.\n\n # Returns\n A tuple (last_output, outputs, new_states).\n\n last_output: the latest output of the rnn, of shape (samples, ...)\n outputs: tensor with shape (samples, time, ...) where each\n entry outputs[s, t] is the output of the step function\n at time t for sample s.\n new_states: list of tensors, latest states returned by\n the step function, of shape (samples, ...).\n '''\n ndim = len(inputs.get_shape())\n assert ndim >= 3, 'Input should be at least 3D.'\n axes = [1, 0] + list(range(2, ndim))\n inputs = tf.transpose(inputs, (axes))\n\n if mask is not None:\n if mask.dtype != tf.bool:\n mask = tf.cast(mask, tf.bool)\n if len(mask.get_shape()) == ndim - 1:\n mask = expand_dims(mask)\n mask = tf.transpose(mask, axes)\n\n if constants is None:\n constants = []\n\n if unroll:\n if not inputs.get_shape()[0]:\n raise Exception('Unrolling requires a fixed number of timesteps.')\n\n states = initial_states\n successive_states = []\n successive_outputs = []\n\n input_list = tf.unpack(inputs)\n if go_backwards:\n input_list.reverse()\n\n if mask is not None:\n mask_list = tf.unpack(mask)\n if go_backwards:\n mask_list.reverse()\n\n for input, mask_t in zip(input_list, mask_list):\n output, new_states = step_function(input, states + constants)\n\n # tf.select needs its condition tensor to be the same shape as its two\n # result tensors, but in our case the condition (mask) tensor is\n # (nsamples, 1), and A and B are (nsamples, ndimensions). So we need to\n # broadcast the mask to match the shape of A and B. That's what the\n # tile call does, is just repeat the mask along its second dimension\n # ndimensions times.\n tiled_mask_t = tf.tile(mask_t, tf.pack([1, tf.shape(output)[1]]))\n\n if len(successive_outputs) == 0:\n prev_output = zeros_like(output)\n else:\n prev_output = successive_outputs[-1]\n\n output = tf.select(tiled_mask_t, output, prev_output)\n\n return_states = []\n for state, new_state in zip(states, new_states):\n # (see earlier comment for tile explanation)\n tiled_mask_t = tf.tile(mask_t, tf.pack([1, tf.shape(new_state)[1]]))\n return_states.append(tf.select(tiled_mask_t, new_state, state))\n\n states = return_states\n successive_outputs.append(output)\n successive_states.append(states)\n last_output = successive_outputs[-1]\n new_states = successive_states[-1]\n outputs = tf.pack(successive_outputs)\n else:\n for input in input_list:\n output, states = step_function(input, states + constants)\n successive_outputs.append(output)\n successive_states.append(states)\n last_output = successive_outputs[-1]\n new_states = successive_states[-1]\n outputs = tf.pack(successive_outputs)\n\n else:\n if go_backwards:\n inputs = tf.reverse(inputs, [True] + [False] * (ndim - 1))\n\n states = tuple(initial_states)\n\n time_steps = tf.shape(inputs)[0]\n output_ta = tensor_array_ops.TensorArray(\n dtype=inputs.dtype,\n size=time_steps,\n tensor_array_name='output_ta')\n input_ta = tensor_array_ops.TensorArray(\n dtype=inputs.dtype,\n size=time_steps,\n tensor_array_name='input_ta')\n input_ta = input_ta.unpack(inputs)\n time = tf.constant(0, dtype='int32', name='time')\n\n if mask is not None:\n if len(states) == 0:\n raise ValueError('No initial states provided! '\n 'When using masking in an RNN, you should '\n 'provide initial states '\n '(and your step function should return '\n 'as its first state at time `t` '\n 'the output at time `t-1`).')\n if go_backwards:\n mask = tf.reverse(mask, [True] + [False] * (ndim - 2))\n\n mask_ta = tensor_array_ops.TensorArray(\n dtype=tf.bool,\n size=time_steps,\n tensor_array_name='mask_ta')\n mask_ta = mask_ta.unpack(mask)\n\n def _step(time, output_ta_t, *states):\n current_input = input_ta.read(time)\n mask_t = mask_ta.read(time)\n output, new_states = step_function(current_input,\n tuple(states) +\n tuple(constants))\n tiled_mask_t = tf.tile(mask_t, tf.pack([1, tf.shape(output)[1]]))\n output = tf.select(tiled_mask_t, output, states[0])\n new_states = [tf.select(tiled_mask_t, new_states[i], states[i]) for i in range(len(states))]\n output_ta_t = output_ta_t.write(time, output)\n return (time + 1, output_ta_t) + tuple(new_states)\n else:\n def _step(time, output_ta_t, *states):\n current_input = input_ta.read(time)\n output, new_states = step_function(current_input,\n tuple(states) +\n tuple(constants))\n output_ta_t = output_ta_t.write(time, output)\n return (time + 1, output_ta_t) + tuple(new_states)\n\n final_outputs = control_flow_ops.while_loop(\n cond=lambda time, *_: time < time_steps,\n body=_step,\n loop_vars=(time, output_ta) + states,\n parallel_iterations=32,\n swap_memory=True)\n last_time = final_outputs[0]\n output_ta = final_outputs[1]\n new_states = final_outputs[2:]\n\n outputs = output_ta.pack()\n last_output = output_ta.read(last_time - 1)\n\n axes = [1, 0] + list(range(2, len(outputs.get_shape())))\n outputs = tf.transpose(outputs, axes)\n return last_output, outputs, new_states\n\n\ndef _cond(condition, then_lambda, else_lambda):\n '''Backwards compatible interface to tf.cond prior to public introduction.\n '''\n try:\n cond_fn = tf.cond\n except AttributeError:\n from tensorflow.python.ops import control_flow_ops\n cond_fn = control_flow_ops.cond\n return cond_fn(condition, then_lambda, else_lambda)\n\n\ndef switch(condition, then_expression, else_expression):\n '''Switches between two operations\n depending on a scalar value (int or bool).\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n # Arguments\n condition: scalar tensor.\n then_expression: TensorFlow operation.\n else_expression: TensorFlow operation.\n '''\n x_shape = copy.copy(then_expression.get_shape())\n if condition.dtype != tf.bool:\n condition = tf.cast(condition, 'bool')\n x = _cond(condition,\n lambda: then_expression,\n lambda: else_expression)\n x.set_shape(x_shape)\n return x\n\n\ndef in_train_phase(x, alt):\n '''Selects `x` in train phase, and `alt` otherwise.\n Note that `alt` should have the *same shape* as `x`.\n '''\n if learning_phase() is 1:\n return x\n elif learning_phase() is 0:\n return alt\n # else: assume learning phase is a placeholder tensor.\n x = switch(learning_phase(), x, alt)\n x._uses_learning_phase = True\n return x\n\n\ndef in_test_phase(x, alt):\n '''Selects `x` in test phase, and `alt` otherwise.\n Note that `alt` should have the *same shape* as `x`.\n '''\n if learning_phase() is 1:\n return alt\n elif learning_phase() is 0:\n return x\n # else: assume learning phase is a placeholder tensor.\n x = switch(learning_phase(), alt, x)\n x._uses_learning_phase = True\n return x\n\n\n# NN OPERATIONS\n\ndef relu(x, alpha=0., max_value=None):\n '''Rectified linear unit\n\n # Arguments\n alpha: slope of negative section.\n max_value: saturation threshold.\n '''\n if alpha != 0.:\n negative_part = tf.nn.relu(-x)\n x = tf.nn.relu(x)\n if max_value is not None:\n max_value = _to_tensor(max_value, x.dtype.base_dtype)\n zero = _to_tensor(0., x.dtype.base_dtype)\n x = tf.clip_by_value(x, zero, max_value)\n if alpha != 0.:\n alpha = _to_tensor(alpha, x.dtype.base_dtype)\n x -= alpha * negative_part\n return x\n\n\ndef elu(x, alpha=1.):\n '''Exponential linear unit.\n\n # Arguments\n x: Tensor to compute the activation function for.\n alpha: scalar\n '''\n res = tf.nn.elu(x)\n if alpha == 1:\n return res\n else:\n return tf.select(x > 0, res, alpha * res)\n\n\ndef softmax(x):\n '''Softmax of a tensor.\n '''\n return tf.nn.softmax(x)\n\n\ndef softplus(x):\n '''Softplus of a tensor.\n '''\n return tf.nn.softplus(x)\n\n\ndef softsign(x):\n '''Softsign of a tensor.\n '''\n return tf.nn.softsign(x)\n\n\ndef categorical_crossentropy(output, target, from_logits=False):\n '''Categorical crossentropy between an output tensor\n and a target tensor, where the target is a tensor of the same\n shape as the output.\n '''\n # Note: tf.nn.softmax_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n if not from_logits:\n # scale preds so that the class probas of each sample sum to 1\n output /= tf.reduce_sum(output,\n reduction_indices=len(output.get_shape()) - 1,\n keep_dims=True)\n # manual computation of crossentropy\n epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon, 1. - epsilon)\n return - tf.reduce_sum(target * tf.log(output),\n reduction_indices=len(output.get_shape()) - 1)\n else:\n return tf.nn.softmax_cross_entropy_with_logits(output, target)\n\n\ndef sparse_categorical_crossentropy(output, target, from_logits=False):\n '''Categorical crossentropy between an output tensor\n and a target tensor, where the target is an integer tensor.\n '''\n # Note: tf.nn.softmax_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n if not from_logits:\n epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon, 1 - epsilon)\n output = tf.log(output)\n\n output_shape = output.get_shape()\n res = tf.nn.sparse_softmax_cross_entropy_with_logits(\n tf.reshape(output, [-1, int(output_shape[-1])]),\n cast(flatten(target), 'int64'))\n if len(output_shape) == 3:\n # if our output includes timesteps we need to reshape\n return tf.reshape(res, [-1, int(output_shape[-2])])\n else:\n return res\n\n\ndef binary_crossentropy(output, target, from_logits=False):\n '''Binary crossentropy between an output tensor and a target tensor.\n '''\n # Note: tf.nn.softmax_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n if not from_logits:\n # transform back to logits\n epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon, 1 - epsilon)\n output = tf.log(output / (1 - output))\n return tf.nn.sigmoid_cross_entropy_with_logits(output, target)\n\n\ndef sigmoid(x):\n '''Element-wise sigmoid.\n '''\n return tf.nn.sigmoid(x)\n\n\ndef hard_sigmoid(x):\n '''Segment-wise linear approximation of sigmoid.\n Faster than sigmoid.\n '''\n x = (0.2 * x) + 0.5\n zero = _to_tensor(0., x.dtype.base_dtype)\n one = _to_tensor(1., x.dtype.base_dtype)\n x = tf.clip_by_value(x, zero, one)\n return x\n\n\ndef tanh(x):\n '''Element-wise tanh.\n '''\n return tf.nn.tanh(x)\n\n\ndef dropout(x, level, noise_shape=None, seed=None):\n '''Sets entries in `x` to zero at random,\n while scaling the entire tensor.\n\n # Arguments\n x: tensor\n level: fraction of the entries in the tensor\n that will be set to 0.\n noise_shape: shape for randomly generated keep/drop flags,\n must be broadcastable to the shape of `x`\n seed: random seed to ensure determinism.\n '''\n retain_prob = 1. - level\n if seed is None:\n seed = np.random.randint(10e6)\n # the dummy 1. works around a TF bug\n # (float32_ref vs. float32 incomptability)\n return tf.nn.dropout(x * 1., retain_prob, noise_shape, seed=seed)\n\n\ndef l2_normalize(x, axis):\n '''Normalizes a tensor wrt the L2 norm alongside the specified axis.\n '''\n if axis < 0:\n axis = axis % len(x.get_shape())\n return tf.nn.l2_normalize(x, dim=axis)\n\n\ndef in_top_k(predictions, targets, k):\n '''Returns whether the `targets` are in the top `k` `predictions`\n\n # Arguments\n predictions: A tensor of shape batch_size x classess and type float32.\n targets: A tensor of shape batch_size and type int32 or int64.\n k: An int, number of top elements to consider.\n\n # Returns\n A tensor of shape batch_size and type bool. output_i is True if\n targets_i is within top-k values of predictions_i\n '''\n return tf.nn.in_top_k(predictions, targets, k)\n\n\n# CONVOLUTIONS\n\ndef _preprocess_deconv_output_shape(shape, dim_ordering):\n if dim_ordering == 'th':\n shape = (shape[0], shape[2], shape[3], shape[1])\n return shape\n\n\ndef _preprocess_conv2d_input(x, dim_ordering):\n if _FLOATX == 'float64':\n x = tf.cast(x, 'float32')\n if dim_ordering == 'th':\n # TF uses the last dimension as channel dimension,\n # instead of the 2nd one.\n # TH input shape: (samples, input_depth, rows, cols)\n # TF input shape: (samples, rows, cols, input_depth)\n x = tf.transpose(x, (0, 2, 3, 1))\n return x\n\n\ndef _preprocess_conv3d_input(x, dim_ordering):\n if _FLOATX == 'float64':\n x = tf.cast(x, 'float32')\n if dim_ordering == 'th':\n # TF uses the last dimension as channel dimension,\n # instead of the 2nd one.\n # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)\n # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)\n x = tf.transpose(x, (0, 2, 3, 4, 1))\n return x\n\n\ndef _preprocess_conv2d_kernel(kernel, dim_ordering):\n if _FLOATX == 'float64':\n kernel = tf.cast(kernel, 'float32')\n if dim_ordering == 'th':\n # TF uses the last dimension as channel dimension,\n # instead of the 2nd one.\n # TH kernel shape: (depth, input_depth, rows, cols)\n # TF kernel shape: (rows, cols, input_depth, depth)\n kernel = tf.transpose(kernel, (2, 3, 1, 0))\n return kernel\n\n\ndef _preprocess_conv3d_kernel(kernel, dim_ordering):\n if _FLOATX == 'float64':\n kernel = tf.cast(kernel, 'float32')\n if dim_ordering == 'th':\n # TF uses the last dimension as channel dimension,\n # instead of the 2nd one.\n # TH kernel shape: (out_depth, input_depth, kernel_dim1, kernel_dim2, kernel_dim3)\n # TF kernel shape: (kernel_dim1, kernel_dim2, kernel_dim3, input_depth, out_depth)\n kernel = tf.transpose(kernel, (2, 3, 4, 1, 0))\n return kernel\n\n\ndef _preprocess_border_mode(border_mode):\n if border_mode == 'same':\n padding = 'SAME'\n elif border_mode == 'valid':\n padding = 'VALID'\n else:\n raise Exception('Invalid border mode: ' + str(border_mode))\n return padding\n\n\ndef _postprocess_conv2d_output(x, dim_ordering):\n if dim_ordering == 'th':\n x = tf.transpose(x, (0, 3, 1, 2))\n\n if _FLOATX == 'float64':\n x = tf.cast(x, 'float64')\n return x\n\n\ndef _postprocess_conv3d_output(x, dim_ordering):\n if dim_ordering == 'th':\n x = tf.transpose(x, (0, 4, 1, 2, 3))\n\n if _FLOATX == 'float64':\n x = tf.cast(x, 'float64')\n return x\n\n\ndef conv1d(x, kernel, stride=1, border_mode='valid',\n image_shape=None, filter_shape=None):\n '''1D convolution.\n\n # Arguments\n kernel: kernel tensor.\n strides: stride integer.\n border_mode: string, \"same\" or \"valid\".\n '''\n # pre-process dtype\n if _FLOATX == 'float64':\n x = tf.cast(x, 'float32')\n kernel = tf.cast(kernel, 'float32')\n padding = _preprocess_border_mode(border_mode)\n x = tf.nn.conv1d(x, kernel, stride, padding=padding)\n # post-process dtype\n if _FLOATX == 'float64':\n x = tf.cast(x, 'float64')\n return x\n\n\ndef conv2d(x, kernel, strides=(1, 1), border_mode='valid',\n dim_ordering='default',\n image_shape=None, filter_shape=None, filter_dilation=(1, 1)):\n '''2D convolution.\n\n # Arguments\n kernel: kernel tensor.\n strides: strides tuple.\n border_mode: string, \"same\" or \"valid\".\n dim_ordering: \"tf\" or \"th\".\n Whether to use Theano or TensorFlow dimension ordering\n for inputs/kernels/ouputs.\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n x = _preprocess_conv2d_input(x, dim_ordering)\n kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)\n padding = _preprocess_border_mode(border_mode)\n if filter_dilation == (1, 1):\n strides = (1,) + strides + (1,)\n x = tf.nn.conv2d(x, kernel, strides, padding=padding)\n else:\n assert filter_dilation[0] == filter_dilation[1]\n assert strides == (1, 1), 'Invalid strides for dilated convolution'\n x = tf.nn.atrous_conv2d(x, kernel, filter_dilation[0], padding=padding)\n return _postprocess_conv2d_output(x, dim_ordering)\n\n\ndef deconv2d(x, kernel, output_shape, strides=(1, 1),\n border_mode='valid',\n dim_ordering='default',\n image_shape=None, filter_shape=None):\n '''2D deconvolution (i.e. transposed convolution).\n\n # Arguments\n x: input tensor.\n kernel: kernel tensor.\n output_shape: 1D int tensor for the output shape.\n strides: strides tuple.\n border_mode: string, \"same\" or \"valid\".\n dim_ordering: \"tf\" or \"th\".\n Whether to use Theano or TensorFlow dimension ordering\n for inputs/kernels/ouputs.\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n x = _preprocess_conv2d_input(x, dim_ordering)\n output_shape = _preprocess_deconv_output_shape(output_shape, dim_ordering)\n kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)\n kernel = tf.transpose(kernel, (0, 1, 3, 2))\n padding = _preprocess_border_mode(border_mode)\n strides = (1,) + strides + (1,)\n\n x = tf.nn.conv2d_transpose(x, kernel, output_shape, strides,\n padding=padding)\n return _postprocess_conv2d_output(x, dim_ordering)\n\n\ndef atrous_conv2d(x, kernel, rate=1,\n border_mode='valid',\n dim_ordering='default',\n image_shape=None, filter_shape=None):\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n if rate == 1:\n return conv2d(x, kernel, strides=(1, 1), border_mode=border_mode,\n dim_ordering=dim_ordering)\n\n x = _preprocess_conv2d_input(x, dim_ordering)\n kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)\n padding = _preprocess_border_mode(border_mode)\n\n x = tf.nn.atrous_conv2d(x, kernel, rate, padding)\n return _postprocess_conv2d_output(x, dim_ordering)\n\n\ndef separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),\n border_mode='valid', dim_ordering='default'):\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n x = _preprocess_conv2d_input(x, dim_ordering)\n depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel,\n dim_ordering)\n pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel,\n dim_ordering)\n padding = _preprocess_border_mode(border_mode)\n strides = (1,) + strides + (1,)\n\n x = tf.nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel,\n strides, padding)\n return _postprocess_conv2d_output(x, dim_ordering)\n\n\ndef conv3d(x, kernel, strides=(1, 1, 1),\n border_mode='valid', dim_ordering='default',\n volume_shape=None, filter_shape=None):\n '''3D convolution.\n\n # Arguments\n kernel: kernel tensor.\n strides: strides tuple.\n border_mode: string, \"same\" or \"valid\".\n dim_ordering: \"tf\" or \"th\".\n Whether to use Theano or TensorFlow dimension ordering\n for inputs/kernels/ouputs.\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n x = _preprocess_conv3d_input(x, dim_ordering)\n kernel = _preprocess_conv3d_kernel(kernel, dim_ordering)\n padding = _preprocess_border_mode(border_mode)\n strides = (1,) + strides + (1,)\n\n x = tf.nn.conv3d(x, kernel, strides, padding)\n return _postprocess_conv3d_output(x, dim_ordering)\n\n\ndef pool2d(x, pool_size, strides=(1, 1),\n border_mode='valid', dim_ordering='default',\n pool_mode='max'):\n '''2D Pooling.\n\n # Arguments\n pool_size: tuple of 2 integers.\n strides: tuple of 2 integers.\n border_mode: one of \"valid\", \"same\".\n dim_ordering: one of \"th\", \"tf\".\n pool_mode: one of \"max\", \"avg\".\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n padding = _preprocess_border_mode(border_mode)\n strides = (1,) + strides + (1,)\n pool_size = (1,) + pool_size + (1,)\n\n x = _preprocess_conv2d_input(x, dim_ordering)\n\n if pool_mode == 'max':\n x = tf.nn.max_pool(x, pool_size, strides, padding=padding)\n elif pool_mode == 'avg':\n x = tf.nn.avg_pool(x, pool_size, strides, padding=padding)\n else:\n raise Exception('Invalid pooling mode: ' + str(pool_mode))\n\n return _postprocess_conv2d_output(x, dim_ordering)\n\n\ndef pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',\n dim_ordering='default', pool_mode='max'):\n '''3D Pooling.\n\n # Arguments\n pool_size: tuple of 3 integers.\n strides: tuple of 3 integers.\n border_mode: one of \"valid\", \"same\".\n dim_ordering: one of \"th\", \"tf\".\n pool_mode: one of \"max\", \"avg\".\n '''\n if dim_ordering == 'default':\n dim_ordering = image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering ' + str(dim_ordering))\n\n padding = _preprocess_border_mode(border_mode)\n strides = (1,) + strides + (1,)\n pool_size = (1,) + pool_size + (1,)\n\n x = _preprocess_conv3d_input(x, dim_ordering)\n\n if pool_mode == 'max':\n x = tf.nn.max_pool3d(x, pool_size, strides, padding=padding)\n elif pool_mode == 'avg':\n x = tf.nn.avg_pool3d(x, pool_size, strides, padding=padding)\n else:\n raise Exception('Invalid pooling mode: ' + str(pool_mode))\n\n return _postprocess_conv3d_output(x, dim_ordering)\n\n\n# RANDOMNESS\n\ndef random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):\n if seed is None:\n seed = np.random.randint(10e6)\n return tf.random_normal(shape, mean=mean, stddev=std,\n dtype=dtype, seed=seed)\n\n\ndef random_uniform(shape, low=0.0, high=1.0, dtype=_FLOATX, seed=None):\n if seed is None:\n seed = np.random.randint(10e6)\n return tf.random_uniform(shape, minval=low, maxval=high,\n dtype=dtype, seed=seed)\n\n\ndef random_binomial(shape, p=0.0, dtype=_FLOATX, seed=None):\n if seed is None:\n seed = np.random.randint(10e6)\n return tf.select(tf.random_uniform(shape, dtype=dtype, seed=seed) <= p,\n tf.ones(shape, dtype=dtype),\n tf.zeros(shape, dtype=dtype))\n\n# CTC\n# tensorflow has a native implemenation, but it uses sparse tensors\n# and therefore requires a wrapper for Keras. The functions below convert\n# dense to sparse tensors and also wraps up the beam search code that is\n# in tensorflow's CTC implementation\n\ndef ctc_label_dense_to_sparse(labels, label_lengths):\n # undocumented feature soon to be made public\n from tensorflow.python.ops import functional_ops\n label_shape = tf.shape(labels)\n num_batches_tns = tf.pack([label_shape[0]])\n max_num_labels_tns = tf.pack([label_shape[1]])\n\n def range_less_than(previous_state, current_input):\n return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)\n\n init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)\n dense_mask = functional_ops.scan(range_less_than, label_lengths,\n initializer=init, parallel_iterations=1)\n dense_mask = dense_mask[:, 0, :]\n\n label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),\n label_shape)\n label_ind = tf.boolean_mask(label_array, dense_mask)\n\n batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),\n max_num_labels_tns), tf.reverse(label_shape, [True])))\n batch_ind = tf.boolean_mask(batch_array, dense_mask)\n indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1]))\n\n vals_sparse = tf.gather_nd(labels, indices)\n\n return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))\n\n\ndef ctc_batch_cost(y_true, y_pred, input_length, label_length):\n\n '''Runs CTC loss algorithm on each batch element.\n\n # Arguments\n y_true: tensor (samples, max_string_length) containing the truth labels\n y_pred: tensor (samples, time_steps, num_categories) containing the prediction,\n or output of the softmax\n input_length: tensor (samples,1) containing the sequence length for\n each batch item in y_pred\n label_length: tensor (samples,1) containing the sequence length for\n each batch item in y_true\n\n # Returns\n Tensor with shape (samples,1) containing the\n CTC loss of each element\n '''\n label_length = tf.to_int32(tf.squeeze(label_length))\n input_length = tf.to_int32(tf.squeeze(input_length))\n sparse_labels = tf.to_int32(ctc_label_dense_to_sparse(y_true, label_length))\n\n y_pred = tf.log(tf.transpose(y_pred, perm=[1, 0, 2]) + 1e-8)\n\n return tf.expand_dims(ctc.ctc_loss(inputs=y_pred,\n labels=sparse_labels,\n sequence_length=input_length), 1)\n\n\ndef ctc_decode(y_pred, input_length, greedy=True, beam_width=100,\n top_paths=1):\n '''Decodes the output of a softmax using either\n greedy (also known as best path) or a constrained dictionary\n search.\n\n # Arguments\n y_pred: tensor (samples, time_steps, num_categories) containing the prediction,\n or output of the softmax\n input_length: tensor (samples,) containing the sequence length for\n each batch item in y_pred\n greedy: perform much faster best-path search if true. This does\n not use a dictionary\n beam_width: if greedy is false: a beam search decoder will be used\n with a beam of this width\n top_paths: if greedy is false: how many of the most probable paths will be returned\n\n # Returns\n Tuple:\n List: if greedy is true, returns a list of one element that contains\n the decoded sequence. If false, returns the `top_paths` most probable\n decoded sequences. Important: blank labels are returned as -1\n Tensor (top_paths,) that contains the log probability of each decoded sequence\n '''\n y_pred = tf.log(tf.transpose(y_pred, perm=[1, 0, 2]) + 1e-8)\n input_length = tf.to_int32(input_length)\n\n if greedy:\n (decoded, log_prob) = ctc.ctc_greedy_decoder(\n inputs=y_pred,\n sequence_length=input_length)\n else:\n (decoded, log_prob) = ctc.ctc_beam_search_decoder(\n inputs=y_pred,\n sequence_length=input_length, beam_width=beam_width,\n top_paths=top_paths)\n\n decoded_dense = [tf.sparse_to_dense(st.indices, st.shape, st.values, default_value=-1)\n for st in decoded]\n\n return (decoded_dense, log_prob)\n"
] | [
[
"tensorflow.nn.conv2d",
"tensorflow.group",
"tensorflow.ones_like",
"tensorflow.ones",
"tensorflow.zeros_like",
"tensorflow.nn.separable_conv2d",
"tensorflow.clip_by_value",
"tensorflow.greater",
"tensorflow.unpack",
"tensorflow.nn.avg_pool",
"tensorflow.random_normal_initializer",
"tensorflow.argmax",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.constant",
"tensorflow.ConfigProto",
"tensorflow.nn.softplus",
"tensorflow.pad",
"tensorflow.less_equal",
"tensorflow.nn.conv2d_transpose",
"tensorflow.all_variables",
"numpy.expand_dims",
"tensorflow.expand_dims",
"tensorflow.reduce_prod",
"tensorflow.fill",
"tensorflow.reduce_all",
"tensorflow.sin",
"tensorflow.to_int32",
"tensorflow.greater_equal",
"tensorflow.reset_default_graph",
"tensorflow.pow",
"tensorflow.square",
"tensorflow.Print",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.reduce_min",
"tensorflow.reverse",
"tensorflow.sqrt",
"tensorflow.tile",
"tensorflow.nn.tanh",
"tensorflow.sparse_tensor_dense_matmul",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.shape",
"tensorflow.random_uniform",
"tensorflow.transpose",
"tensorflow.nn.conv1d",
"numpy.eye",
"tensorflow.squeeze",
"tensorflow.nn.sigmoid",
"numpy.array",
"tensorflow.zeros",
"tensorflow.minimum",
"tensorflow.range",
"tensorflow.to_int64",
"tensorflow.round",
"tensorflow.log",
"tensorflow.reduce_sum",
"tensorflow.reduce_any",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.sign",
"tensorflow.initialize_variables",
"tensorflow.gather",
"tensorflow.contrib.ctc.ctc_beam_search_decoder",
"tensorflow.reduce_mean",
"tensorflow.get_default_session",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.constant_initializer",
"tensorflow.nn.moments",
"tensorflow.reshape",
"tensorflow.one_hot",
"tensorflow.random_normal",
"tensorflow.cast",
"tensorflow.SparseTensor",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.assign_sub",
"tensorflow.nn.softsign",
"tensorflow.nn.batch_normalization",
"tensorflow.abs",
"tensorflow.nn.relu",
"tensorflow.select",
"tensorflow.Session",
"tensorflow.pack",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.argmin",
"numpy.asarray",
"tensorflow.random_uniform_initializer",
"tensorflow.python.ops.functional_ops.scan",
"tensorflow.maximum",
"tensorflow.stop_gradient",
"tensorflow.nn.l2_normalize",
"tensorflow.exp",
"tensorflow.nn.in_top_k",
"tensorflow.matmul",
"tensorflow.gradients",
"tensorflow.assign_add",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.nn.max_pool3d",
"tensorflow.get_default_graph",
"numpy.random.randint",
"tensorflow.sparse_concat",
"tensorflow.split",
"tensorflow.nn.max_pool",
"tensorflow.nn.dropout",
"tensorflow.nn.avg_pool3d",
"tensorflow.nn.conv3d",
"tensorflow.sparse_placeholder",
"tensorflow.nn.atrous_conv2d",
"tensorflow.cos",
"tensorflow.gather_nd",
"tensorflow.contrib.ctc.ctc_greedy_decoder",
"tensorflow.placeholder",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.boolean_mask",
"tensorflow.batch_matmul",
"tensorflow.convert_to_tensor",
"tensorflow.sparse_to_dense",
"tensorflow.assign",
"tensorflow.not_equal",
"tensorflow.equal",
"tensorflow.reduce_max",
"tensorflow.contrib.ctc.ctc_loss",
"tensorflow.nn.elu"
]
] |
DavidRother/semeval2020-task1 | [
"715f82afb8b282669d59ff610b63714d19db4618"
] | [
"semeval2020/util/embeddingloader.py"
] | [
"from os import listdir\nimport os.path\nimport pandas as pd\n\n\nclass EmbeddingLoader:\n\n def __init__(self, base_path, language='german', corpus='corpus2', explicit_word_list=None):\n self.explicit_word_list = explicit_word_list or []\n self.language = language\n self.corpus = corpus\n self.base_path = base_path\n self.target_words = self.find_target_words()\n self.embeddings = self.load_embeddings()\n\n @staticmethod\n def _find_csv_filenames(path_to_dir, suffix=\".csv\"):\n filenames = listdir(path_to_dir)\n return [filename for filename in filenames if filename.endswith(suffix)]\n\n def find_target_words(self):\n target_dir = f\"{self.base_path}{self.language}/{self.corpus}/\"\n csv_filenames = self._find_csv_filenames(target_dir)\n return [os.path.splitext(filename)[0] for filename in csv_filenames]\n\n def load_embeddings(self):\n target_dir = f\"{self.base_path}{self.language}/{self.corpus}/\"\n embedding_dict = {target_word: None for target_word in self.target_words}\n for filename in self._find_csv_filenames(target_dir):\n word = os.path.splitext(filename)[0]\n embedding_dict[word] = pd.read_csv(f\"{target_dir}/{filename}\")\n return embedding_dict\n\n def __getitem__(self, key):\n return self.embeddings[key]\n\n\n"
] | [
[
"pandas.read_csv"
]
] |
bugerry87/flownet3d | [
"d55391a3d6ab0d5748d55eb4dfce40c68fd9b742"
] | [
"utils/utils.py"
] | [
"'''\r\nHelper functions for this project.\r\n\r\nAuthor: Gerald Baulig\r\n'''\r\n\r\n#Global libs\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom time import time\r\nfrom glob import glob, iglob\r\n\r\n\r\ndef myinput(prompt, default=None, cast=None):\r\n ''' myinput(prompt, default=None, cast=None) -> arg\r\n Handle an interactive user input.\r\n Returns a default value if no input is given.\r\n Casts or parses the input immediately.\r\n Loops the input prompt until a valid input is given.\r\n \r\n Args:\r\n prompt: The prompt or help text.\r\n default: The default value if no input is given.\r\n cast: A cast or parser function.\r\n '''\r\n while True:\r\n arg = input(prompt)\r\n if arg == '':\r\n return default\r\n elif cast != None:\r\n try:\r\n return cast(arg)\r\n except:\r\n print(\"Invalid input type. Try again...\")\r\n else:\r\n return arg\r\n pass\r\n\r\n\r\ndef ifile(wildcards, sort=False, recursive=True):\r\n def sglob(wc):\r\n if sort:\r\n return sorted(glob(wc, recursive=recursive))\r\n else:\r\n return iglob(wc, recursive=recursive)\r\n\r\n if isinstance(wildcards, str):\r\n for wc in sglob(wildcards):\r\n yield wc\r\n elif isinstance(wildcards, list):\r\n if sort:\r\n wildcards = sorted(wildcards)\r\n for wc in wildcards:\r\n if any(('*?[' in c) for c in wc):\r\n for c in sglob(wc):\r\n yield c\r\n else:\r\n yield wc\r\n else:\r\n raise TypeError(\"wildecards must be string or list.\") \r\n\r\n\r\ndef arrange_subplots(pltc):\r\n ''' arrange_subplots(pltc) -> fig, axes\r\n Arranges a given number of plots to well formated subplots.\r\n \r\n Args:\r\n pltc: The number of plots.\r\n \r\n Returns:\r\n fig: The figure.\r\n axes: A list of axes of each subplot.\r\n '''\r\n cols = int(np.floor(np.sqrt(pltc)))\r\n rows = int(np.ceil(pltc/cols))\r\n fig, axes = plt.subplots(cols,rows)\r\n if not isinstance(axes, np.ndarray):\r\n axes = np.array([axes]) #fix format so it can be used consistently.\r\n \r\n return fig, axes.flatten()\r\n\r\n\r\nlast_call = 0\r\ndef time_delta():\r\n ''' time_delta() -> delta\r\n Captures time delta from last call.\r\n \r\n Returns:\r\n delta: Past time in seconds.\r\n '''\r\n global last_call\r\n delta = time() - last_call\r\n return delta"
] | [
[
"numpy.array",
"numpy.ceil",
"numpy.sqrt",
"matplotlib.pyplot.subplots"
]
] |
norihitoishida/nyaggle | [
"fb838f0010a666835216c588919540e9bfbc5731"
] | [
"tests/validation/test_cross_validate.py"
] | [
"from typing import List\n\nimport numpy as np\n\nfrom catboost import CatBoostClassifier\nfrom lightgbm import LGBMClassifier\nfrom sklearn.datasets import make_classification, make_regression\nfrom sklearn.linear_model import RidgeClassifier, Ridge\nfrom sklearn.metrics import roc_auc_score, r2_score\nfrom sklearn.model_selection import train_test_split, KFold\n\nfrom nyaggle.experiment import autoprep_gbdt\nfrom nyaggle.testing import make_classification_df\nfrom nyaggle.validation import cross_validate, Take\n\n\ndef test_cv_sklean_binary():\n X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n model = RidgeClassifier(alpha=1.0)\n\n pred_oof, pred_test, scores, _ = cross_validate(model, X_train, y_train, X_test, cv=5, eval_func=roc_auc_score)\n\n assert len(scores) == 5 + 1\n assert scores[-1] >= 0.85 # overall auc\n assert roc_auc_score(y_train, pred_oof) == scores[-1]\n assert roc_auc_score(y_test, pred_test) >= 0.85 # test score\n\n\ndef test_cv_sklean_regression():\n X, y = make_regression(n_samples=1024, n_features=20, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n model = Ridge(alpha=1.0)\n\n pred_oof, pred_test, scores, _ = cross_validate(model, X_train, y_train, X_test, cv=5, eval_func=r2_score)\n\n print(scores)\n assert len(scores) == 5 + 1\n assert scores[-1] >= 0.95 # overall r2\n assert r2_score(y_train, pred_oof) == scores[-1]\n assert r2_score(y_test, pred_test) >= 0.95 # test r2\n\n\ndef test_cv_lgbm():\n X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n models = [LGBMClassifier(n_estimators=300) for _ in range(5)]\n\n pred_oof, pred_test, scores, importance = cross_validate(models, X_train, y_train, X_test, cv=5,\n eval_func=roc_auc_score,\n fit_params={'early_stopping_rounds': 200})\n\n print(scores)\n assert len(scores) == 5 + 1\n assert scores[-1] >= 0.85 # overall roc_auc\n assert roc_auc_score(y_train, pred_oof) == scores[-1]\n assert roc_auc_score(y_test, pred_test) >= 0.85 # test roc_auc\n assert roc_auc_score(y, models[0].predict_proba(X)[:, 1]) >= 0.85 # make sure models are trained\n assert len(importance) == 5\n assert list(importance[0].columns) == ['feature', 'importance']\n assert len(importance[0]) == 20\n\n\ndef test_cv_lgbm_df():\n X, y = make_classification_df(n_samples=1024, n_num_features=20, n_cat_features=1, class_sep=0.98, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n models = [LGBMClassifier(n_estimators=300) for _ in range(5)]\n\n pred_oof, pred_test, scores, importance = cross_validate(models, X_train, y_train, X_test, cv=5,\n eval_func=roc_auc_score)\n\n print(scores)\n assert len(scores) == 5 + 1\n assert scores[-1] >= 0.85 # overall roc_auc\n assert roc_auc_score(y_train, pred_oof) == scores[-1]\n assert roc_auc_score(y_test, pred_test) >= 0.85 # test roc_auc\n assert roc_auc_score(y_test, models[0].predict_proba(X_test)[:, 1]) >= 0.85 # make sure models are trained\n assert len(importance) == 5\n assert list(importance[0].columns) == ['feature', 'importance']\n assert len(importance[0]) == 20 + 1\n assert models[0].booster_.num_trees() < 300 # making sure early stopping worked\n\n\ndef test_cv_cat_df():\n X, y = make_classification_df(n_samples=1024, n_num_features=20, n_cat_features=1, class_sep=0.98, random_state=0)\n X, _ = autoprep_gbdt('cat', X, None)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n models = [CatBoostClassifier(n_estimators=300) for _ in range(5)]\n\n pred_oof, pred_test, scores, importance = cross_validate(models, X_train, y_train, X_test, cv=5,\n eval_func=roc_auc_score,\n fit_params={'cat_features': ['cat_0']})\n\n print(scores)\n assert len(scores) == 5 + 1\n assert scores[-1] >= 0.85 # overall roc_auc\n assert roc_auc_score(y_train, pred_oof) == scores[-1]\n assert roc_auc_score(y_test, pred_test) >= 0.85 # test roc_auc\n assert roc_auc_score(y_test, models[0].predict_proba(X_test)[:, 1]) >= 0.85 # make sure models are trained\n assert len(importance) == 5\n assert list(importance[0].columns) == ['feature', 'importance']\n assert len(importance[0]) == 20 + 1\n assert models[0].tree_count_ < 300 # making sure early stopping worked\n\n\ndef test_cv_partial_evaluate():\n X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n model = RidgeClassifier(alpha=1.0)\n\n n = 0\n\n def _fold_count(*args):\n nonlocal n\n n += 1\n\n cv = Take(2, KFold(5))\n\n pred_oof, pred_test, scores, _ = cross_validate(model, X_train, y_train, X_test, cv=cv, eval_func=roc_auc_score,\n on_each_fold=_fold_count)\n\n assert len(scores) == 2 + 1\n assert scores[-1] >= 0.8 # overall auc\n assert n == 2\n\n\ndef test_fit_params_callback():\n X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n models = [LGBMClassifier(n_estimators=300) for _ in range(5)]\n\n sample_weights = np.random.randint(1, 10, size=len(X_train))\n sample_weights = sample_weights / sample_weights.sum()\n\n def fit_params(n: int, train_index: List[int], valid_index: List[int]):\n return {\n 'early_stopping_rounds': 100,\n 'sample_weight': list(sample_weights[train_index]),\n 'eval_sample_weight': [list(sample_weights[valid_index])]\n }\n\n result_w_weight = cross_validate(models, X_train, y_train, X_test, cv=5,\n eval_func=roc_auc_score, fit_params=fit_params)\n\n result_wo_weight = cross_validate(models, X_train, y_train, X_test, cv=5,\n eval_func=roc_auc_score, fit_params={'early_stopping_rounds': 50})\n\n assert result_w_weight.scores[-1] != result_wo_weight.scores[-1]\n"
] | [
[
"sklearn.linear_model.RidgeClassifier",
"sklearn.metrics.r2_score",
"sklearn.linear_model.Ridge",
"sklearn.model_selection.KFold",
"sklearn.datasets.make_regression",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.roc_auc_score",
"sklearn.datasets.make_classification"
]
] |
harry-stark/jax | [
"236efbdbac643f497f1455ebbcebb7d726f3703c"
] | [
"tests/profiler_test.py"
] | [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nimport glob\nimport os\nimport shutil\nimport tempfile\nimport threading\nimport unittest\nfrom absl.testing import absltest\n\nimport jax\nimport jax.numpy as jnp\nimport jax.profiler\nfrom jax.config import config\nimport jax._src.test_util as jtu\n\ntry:\n import portpicker\nexcept ImportError:\n portpicker = None\n\ntry:\n from tensorflow.python.profiler import profiler_client\n from tensorflow.python.profiler import profiler_v2 as tf_profiler\nexcept ImportError:\n profiler_client = None\n tf_profiler = None\n\nconfig.parse_flags_with_absl()\n\n\nclass ProfilerTest(unittest.TestCase):\n # These tests simply test that the profiler API does not crash; they do not\n # check functional correctness.\n\n def setUp(self):\n super().setUp()\n self.worker_start = threading.Event()\n self.profile_done = False\n\n @unittest.skipIf(not portpicker, \"Test requires portpicker\")\n def testStartStopServer(self):\n port = portpicker.pick_unused_port()\n jax.profiler.start_server(port=port)\n del port\n jax.profiler.stop_server()\n\n @unittest.skipIf(not portpicker, \"Test requires portpicker\")\n def testCantStartMultipleServers(self):\n port = portpicker.pick_unused_port()\n jax.profiler.start_server(port=port)\n port = portpicker.pick_unused_port()\n with self.assertRaisesRegex(\n ValueError, \"Only one profiler server can be active at a time.\"):\n jax.profiler.start_server(port=port)\n jax.profiler.stop_server()\n\n def testCantStopServerBeforeStartingServer(self):\n with self.assertRaisesRegex(ValueError, \"No active profiler server.\"):\n jax.profiler.stop_server()\n\n def testProgrammaticProfiling(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n try:\n jax.profiler.start_trace(tmpdir)\n jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(\n jnp.ones(jax.local_device_count()))\n finally:\n jax.profiler.stop_trace()\n\n proto_path = glob.glob(os.path.join(tmpdir, \"**/*.xplane.pb\"),\n recursive=True)\n self.assertEqual(len(proto_path), 1)\n with open(proto_path[0], \"rb\") as f:\n proto = f.read()\n # Sanity check that serialized proto contains host, device, and\n # Python traces without deserializing.\n self.assertIn(b\"/host:CPU\", proto)\n if jtu.device_under_test() == \"tpu\":\n self.assertIn(b\"/device:TPU\", proto)\n self.assertIn(b\"pxla.py\", proto)\n\n def testProgrammaticProfilingErrors(self):\n with self.assertRaisesRegex(RuntimeError, \"No profile started\"):\n jax.profiler.stop_trace()\n\n try:\n with tempfile.TemporaryDirectory() as tmpdir:\n jax.profiler.start_trace(tmpdir)\n with self.assertRaisesRegex(\n RuntimeError,\n \"Profile has already been started. Only one profile may be run at a \"\n \"time.\"):\n jax.profiler.start_trace(tmpdir)\n finally:\n jax.profiler.stop_trace()\n\n def testProgrammaticProfilingContextManager(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n with jax.profiler.trace(tmpdir):\n jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(\n jnp.ones(jax.local_device_count()))\n\n proto_path = glob.glob(os.path.join(tmpdir, \"**/*.xplane.pb\"),\n recursive=True)\n self.assertEqual(len(proto_path), 1)\n with open(proto_path[0], \"rb\") as f:\n proto = f.read()\n # Sanity check that serialized proto contains host and device traces\n # without deserializing.\n self.assertIn(b\"/host:CPU\", proto)\n if jtu.device_under_test() == \"tpu\":\n self.assertIn(b\"/device:TPU\", proto)\n\n def testTraceAnnotation(self):\n x = 3\n with jax.profiler.TraceAnnotation(\"mycontext\"):\n x = x + 2\n\n def testTraceFunction(self):\n @jax.profiler.annotate_function\n def f(x, *, y):\n return x + 2 * y\n self.assertEqual(f(7, y=3), 13)\n\n @jax.profiler.annotate_function\n def f(x, *, name):\n return x + 2 * len(name)\n self.assertEqual(f(7, name=\"abc\"), 13)\n\n @partial(jax.profiler.annotate_function, name=\"aname\")\n def g(x):\n return x + 2\n self.assertEqual(g(7), 9)\n\n @partial(jax.profiler.annotate_function, name=\"aname\", akwarg=\"hello\")\n def h(x):\n return x + 2\n self.assertEqual(h(7), 9)\n\n def testDeviceMemoryProfile(self):\n x = jnp.ones((20,)) + 7.\n self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)\n del x\n\n def _check_xspace_pb_exist(self, logdir):\n path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')\n self.assertEqual(1, len(glob.glob(path)),\n 'Expected one path match: ' + path)\n\n @unittest.skip(\"Test causes OOMs\")\n @unittest.skipIf(not (portpicker and profiler_client and tf_profiler),\n \"Test requires tensorflow.profiler and portpicker\")\n def testSingleWorkerSamplingMode(self, delay_ms=None):\n def on_worker(port, worker_start):\n jax.profiler.start_server(port)\n worker_start.set()\n x = jnp.ones((1000, 1000))\n while True:\n with jax.profiler.TraceAnnotation(\"atraceannotation\"):\n jnp.dot(x, x.T).block_until_ready()\n if self.profile_done:\n jax.profiler.stop_server()\n break\n\n def on_profile(port, logdir, worker_start):\n worker_start.wait()\n options = tf_profiler.ProfilerOptions(\n host_tracer_level=2,\n python_tracer_level=2,\n device_tracer_level=1,\n delay_ms=delay_ms,\n )\n\n # Request for 1000 milliseconds of profile.\n duration_ms = 1000\n profiler_client.trace(f'localhost:{port}', logdir, duration_ms,\n '', 1000, options)\n self.profile_done = True\n\n logdir = absltest.get_default_test_tmpdir()\n # Remove any existing log files.\n shutil.rmtree(logdir, ignore_errors=True)\n port = portpicker.pick_unused_port()\n thread_profiler = threading.Thread(\n target=on_profile, args=(port, logdir, self.worker_start))\n thread_worker = threading.Thread(\n target=on_worker, args=(port, self.worker_start))\n thread_worker.start()\n thread_profiler.start()\n thread_profiler.join()\n thread_worker.join(120)\n self._check_xspace_pb_exist(logdir)\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] | [
[
"tensorflow.python.profiler.profiler_client.trace",
"tensorflow.python.profiler.profiler_v2.ProfilerOptions"
]
] |
TheYuanLiao/individual_mobility_model | [
"3026b8d51e50ffe24a398b22789c86230cd971ce"
] | [
"src/py/sqlite3-converter.py"
] | [
"import os\nimport subprocess\nimport yaml\nimport sqlite3\nfrom sqlalchemy import create_engine\nimport pandas as pd\nimport multiprocessing as mp\nfrom tzwhere import tzwhere\nfrom dateutil import tz\nfrom datetime import datetime as dt\ntzg = tzwhere.tzwhere()\n\n\ndef get_repo_root():\n \"\"\"Get the root directory of the repo.\"\"\"\n dir_in_repo = os.path.dirname(os.path.abspath('__file__')) # os.getcwd()\n return subprocess.check_output('git rev-parse --show-toplevel'.split(),\n cwd=dir_in_repo,\n universal_newlines=True).rstrip()\n\n\nROOT_dir = get_repo_root()\n\nwith open(ROOT_dir + '/lib/regions.yaml') as f:\n region_manager = yaml.load(f, Loader=yaml.FullLoader)\n\n\ndef where_self(row):\n \"\"\"\n Get the time zone from a pair of GPS coordinates.\n :param row: contains \"latitude\" and \"longitude\"\n :type row: a row of a dataframe\n :return: a timezone corresponding to the input coordinates pair\n :rtype: string\n \"\"\"\n try:\n x = tz.gettz(tzg.tzNameAt(row[\"latitude\"], row[\"longitude\"]))\n except:\n x = \"Unknown\"\n return x\n\n\ndef where2time(row):\n \"\"\"\n Convert UTC time to local time with known time zone.\n :param row: contains \"UTC\", \"time_zone\", \"created_at\"\n :type row: a row of a dataframe\n :return: local time converted from UTC and time_zone\n :rtype: object\n \"\"\"\n from_zone = tz.gettz('UTC')\n timezone = row[\"time_zone\"]\n\n rawUTC = dt.strptime(row[\"created_at\"],'%b %d %H:%M:%S %Y')\n utc = rawUTC.replace(tzinfo=from_zone)\n central = str(utc.astimezone(timezone))\n rawT = dt.strptime(central[:-6], '%Y-%m-%d %H:%M:%S')\n return rawT\n\n\ndef region_converter(region=None):\n \"\"\"\n Convert time of raw geotagged tweets and save to a new db per specified region.\n :param region: name of the region to be processed\n :type region: string\n :return: none\n :rtype: none\n \"\"\"\n if not os.path.exists(ROOT_dir + f'/dbs/{region}/{region}.sqlite3'):\n db = region_manager[region]['source_path']\n conn = sqlite3.connect(db)\n df_raw = pd.read_sql_query(\"SELECT * FROM geo INNER JOIN records ON geo.rec_id = records.id\", con=conn)\n df_raw = df_raw.loc[:, ['tw_id', 'user_id', 'time',\n 'coord_lat', 'coord_long']].rename(columns={'tw_id': 'tweet_id',\n 'time': 'created_at',\n 'coord_lat': 'latitude',\n 'coord_long': 'longitude'})\n print(f'Region {region} data retrieved.')\n # Get time zone and convert the UTC time into local time\n df_raw[\"time_zone\"] = df_raw.apply(lambda row: where_self(row), axis=1)\n df_raw = df_raw.loc[df_raw[\"time_zone\"] != \"Unknown\", :]\n df_raw[\"created_at\"] = df_raw.apply(lambda row: where2time(row), axis=1)\n df_raw[\"time_zone\"] = df_raw[\"time_zone\"].apply(lambda x: str(x)[8:-2])\n df_raw.loc[:, 'month'] = df_raw.loc[:, 'created_at'].apply(lambda x: x.month)\n df_raw.loc[:, 'weekday'] = df_raw.loc[:, 'created_at'].apply(lambda x: x.weekday())\n df_raw.loc[:, 'hour_of_day'] = df_raw.loc[:, 'created_at'].apply(lambda x: x.hour)\n df_raw = df_raw.loc[df_raw['time_zone'] != '']\n print(f'Region {region} time processed.')\n\n # Save into a new .sqlite3\n if not os.path.exists(ROOT_dir + f'/dbs/{region}/'):\n os.makedirs(ROOT_dir + f'/dbs/{region}/')\n\n engine = create_engine('sqlite:///' + ROOT_dir + f'/dbs/{region}/{region}.sqlite3', echo=False)\n sqlite_connection = engine.connect()\n sqlite_table = 'geo_tweet'\n df_raw.to_sql(sqlite_table, sqlite_connection, if_exists='replace', index=False)\n sqlite_connection.close()\n print(f'Region {region} converted .sqlite3 saved.')\n else:\n print(f'Region {region} converted .sqlite3 exists!')\n\n\nif __name__ == '__main__':\n region_list = ['sweden', 'netherlands', 'saopaulo', 'australia', 'austria', 'barcelona',\n 'capetown', 'cebu', 'egypt', 'guadalajara', 'jakarta',\n 'johannesburg', 'kualalumpur', 'lagos', 'madrid', 'manila', 'mexicocity', 'moscow', 'nairobi',\n 'rio', 'saudiarabia', 'stpertersburg', 'surabaya']\n\n # parallelize the converting of tweets of multiple regions\n pool = mp.Pool(mp.cpu_count())\n pool.starmap(region_converter, [(r, ) for r in region_list])\n pool.close()\n"
] | [
[
"pandas.read_sql_query"
]
] |
MachengShen/Implicit_ensemble_training | [
"e649f21937612f6aae1b9ba158011c0dce49000c"
] | [
"IET_module/networks/soft_modular_model.py"
] | [
"import numpy as np\nfrom typing import Tuple, List\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.framework import try_import_torch\nfrom ray.rllib.models.torch.misc import SlimFC\nimport torch.nn.functional as F\nfrom IET_module.networks.utils import basic_init, uniform_init, null_activation\nfrom IET_module.utils.check_nan_inf import assert_nan_inf\n\n\ntorch, nn = try_import_torch()\n\n\nclass EmbeddingShapingNet(nn.Module):\n # This network takes the gaussian latent parameter as input,\n # and shape it through a FC network with a softmax layer at the end\n # so that the output is a soft version of a task one-hot embedding\n # we use the same shape for input, intermediate layer and output\n def __init__(self, eb_dim: int,\n hidden_dims: List[int]=None,\n softmax: bool=True,\n ) -> None:\n super(EmbeddingShapingNet, self).__init__()\n layers = []\n if not hidden_dims:\n hidden_dims = [eb_dim for j in range(2)]\n layers.append(SlimFC(in_size=eb_dim,\n out_size=hidden_dims[0],\n activation_fn='relu',\n initializer=torch.nn.init.xavier_uniform_)\n )\n for i in range(len(hidden_dims) - 1):\n layer = SlimFC(in_size=hidden_dims[i],\n out_size=hidden_dims[i + 1],\n activation_fn='relu',\n initializer=torch.nn.init.xavier_uniform_)\n layers.append(layer)\n layers.append(nn.Linear(hidden_dims[-1], eb_dim))\n if softmax:\n layers.append(nn.Softmax(dim=-1))\n self._net = nn.Sequential(*layers)\n\n def forward(self, embedding: torch.Tensor) -> torch.Tensor:\n return self._net(embedding)\n\nclass ModularGatedCascadeCondNet(nn.Module):\n def __init__(self, observation_space, action_space, num_outputs,\n model_config, name, network_type: str='policy',\n **kwargs):\n nn.Module.__init__(self)\n self._network_type = network_type\n\n assert network_type in ['policy', 'value'], \"must be either policy or value network\"\n if self._network_type == 'policy':\n output_shape = action_space.n\n else:\n output_shape = 1\n\n self._config = model_config['custom_model_config']\n self._use_latent_embedding = self._config['use_latent_embedding']\n self._use_dict_obs_space = self._config['use_dict_obs_space']\n base_type = self._config['base_type']\n em_input_shape = self._config['em_input_shape']\n self._em_input_shape = em_input_shape\n self._emb_shaping_net_hidden_shapes = self._config['emb_shaping_net_hidden_shapes']\n self._emb_shaping_net_last_softmax = self._config['emb_shaping_net_last_softmax']\n em_hidden_shapes = self._config['em_hidden_shapes']\n hidden_shapes = self._config['hidden_shapes']\n num_layers, num_modules = self._config['num_layers'], self._config['num_modules']\n module_hidden = self._config['module_hidden']\n gating_hidden, num_gating_layers = self._config['gating_hidden'], self._config['num_gating_layers']\n # gated_hidden\n add_bn = self._config.get('add_bn', True)\n pre_softmax = self._config.get('pre_softmax', False)\n cond_ob = self._config.get('cond_ob', True)\n module_hidden_init_func = self._config.get('module_hidden_init_func', basic_init)\n last_init_func = self._config.get('last_init_func', uniform_init)\n activation_func = self._config.get('activation_func', F.relu)\n\n if self._use_dict_obs_space:\n # the obs_space_dict was flattened by rllib, but the observation is the original\n base_input_shape = observation_space.original_space.spaces['original_obs'].shape\n else:\n input_shape = observation_space.shape\n def _get_base_input_shape(raw_input_shape: Tuple, embedding_dim: int) -> Tuple:\n if not len(raw_input_shape) == 1:\n raise AssertionError(\"only support flat observation\")\n return (raw_input_shape[0] - embedding_dim,)\n base_input_shape = _get_base_input_shape(input_shape, em_input_shape)\n\n self.base = base_type(\n last_activation_func = null_activation,\n input_shape = base_input_shape,\n activation_func = activation_func,\n hidden_shapes = hidden_shapes,)\n self.em_base = base_type(\n last_activation_func = null_activation,\n input_shape = em_input_shape,\n activation_func = activation_func,\n hidden_shapes = em_hidden_shapes,)\n\n if self._use_latent_embedding:\n self._embedding_shaping_net = EmbeddingShapingNet(eb_dim=self._em_input_shape,\n hidden_dims=self._emb_shaping_net_hidden_shapes,\n softmax=self._emb_shaping_net_last_softmax,\n )\n\n self.activation_func = activation_func\n\n module_input_shape = self.base.output_shape\n self.layer_modules = []\n\n self.num_layers = num_layers\n self.num_modules = num_modules\n\n for i in range(num_layers):\n layer_module = []\n for j in range( num_modules ):\n fc = nn.Linear(module_input_shape, module_hidden)\n module_hidden_init_func(fc)\n if add_bn:\n module = nn.Sequential(\n nn.BatchNorm1d(module_input_shape),\n fc,\n nn.BatchNorm1d(module_hidden)\n )\n else:\n module = fc\n\n layer_module.append(module)\n self.__setattr__(\"module_{}_{}\".format(i,j), module)\n\n module_input_shape = module_hidden\n self.layer_modules.append(layer_module)\n self.last = nn.Linear(module_input_shape, output_shape)\n last_init_func( self.last )\n\n assert self.em_base.output_shape == self.base.output_shape, \\\n \"embedding should has the same dimension with base output for gated\"\n gating_input_shape = self.em_base.output_shape\n self.gating_fcs = []\n for i in range(num_gating_layers):\n gating_fc = nn.Linear(gating_input_shape, gating_hidden)\n module_hidden_init_func(gating_fc)\n self.gating_fcs.append(gating_fc)\n self.__setattr__(\"gating_fc_{}\".format(i), gating_fc)\n gating_input_shape = gating_hidden\n\n self.gating_weight_fcs = []\n self.gating_weight_cond_fcs = []\n\n self.gating_weight_fc_0 = nn.Linear(gating_input_shape,\n num_modules * num_modules )\n last_init_func( self.gating_weight_fc_0)\n # self.gating_weight_fcs.append(self.gating_weight_fc_0)\n\n for layer_idx in range(num_layers-2):\n gating_weight_cond_fc = nn.Linear((layer_idx+1) * \\\n num_modules * num_modules,\n gating_input_shape)\n module_hidden_init_func(gating_weight_cond_fc)\n self.__setattr__(\"gating_weight_cond_fc_{}\".format(layer_idx+1),\n gating_weight_cond_fc)\n self.gating_weight_cond_fcs.append(gating_weight_cond_fc)\n\n gating_weight_fc = nn.Linear(gating_input_shape,\n num_modules * num_modules)\n last_init_func(gating_weight_fc)\n self.__setattr__(\"gating_weight_fc_{}\".format(layer_idx+1),\n gating_weight_fc)\n self.gating_weight_fcs.append(gating_weight_fc)\n\n self.gating_weight_cond_last = nn.Linear((num_layers-1) * \\\n num_modules * num_modules,\n gating_input_shape)\n module_hidden_init_func(self.gating_weight_cond_last)\n\n self.gating_weight_last = nn.Linear(gating_input_shape, num_modules)\n last_init_func( self.gating_weight_last )\n\n self.pre_softmax = pre_softmax\n self.cond_ob = cond_ob\n\n @property\n def embedding_shaping_net(self) -> nn.Module:\n return self._embedding_shaping_net\n\n def forward(self, input_dict, state, seq_lens, return_weights = False):\n # out = self.first_layer(input_dict[\"obs\"])\n # self._output = self._global_shared_layer(out)\n # model_out = self.last_layer(self._output)\n # return model_out, []\n # extract the head of the observation vector as raw obs\n # and the tail of the observation vector as the latent Gaussian parameter\n if self._use_dict_obs_space:\n x = input_dict['obs']['original_obs']\n gaussian_latent_parameter = input_dict['obs']['random_noise']\n else:\n x = input_dict['obs'][:, :-self._em_input_shape]\n gaussian_latent_parameter = input_dict['obs'][:, -self._em_input_shape:]\n\n if self._use_latent_embedding:\n embedding_input = self._embedding_shaping_net(gaussian_latent_parameter)\n else:\n batch_size = x.shape[0]\n embedding_input = torch.ones(batch_size, self._em_input_shape) / self._em_input_shape\n\n out = self.base(x)\n embedding = self.em_base(embedding_input)\n\n if self.cond_ob:\n embedding = embedding * out\n\n out = self.activation_func(out)\n\n if len(self.gating_fcs) > 0:\n embedding = self.activation_func(embedding)\n for fc in self.gating_fcs[:-1]:\n embedding = fc(embedding)\n embedding = self.activation_func(embedding)\n embedding = self.gating_fcs[-1](embedding)\n\n base_shape = embedding.shape[:-1]\n\n weights = []\n flatten_weights = []\n\n raw_weight = self.gating_weight_fc_0(self.activation_func(embedding))\n\n weight_shape = base_shape + torch.Size([self.num_modules,\n self.num_modules])\n flatten_shape = base_shape + torch.Size([self.num_modules * \\\n self.num_modules])\n\n raw_weight = raw_weight.view(weight_shape)\n\n softmax_weight = F.softmax(raw_weight, dim=-1)\n weights.append(softmax_weight)\n if self.pre_softmax:\n flatten_weights.append(raw_weight.view(flatten_shape))\n else:\n flatten_weights.append(softmax_weight.view(flatten_shape))\n\n for gating_weight_fc, gating_weight_cond_fc in zip(self.gating_weight_fcs, self.gating_weight_cond_fcs):\n cond = torch.cat(flatten_weights, dim=-1)\n if self.pre_softmax:\n cond = self.activation_func(cond)\n cond = gating_weight_cond_fc(cond)\n cond = cond * embedding\n cond = self.activation_func(cond)\n\n raw_weight = gating_weight_fc(cond)\n raw_weight = raw_weight.view(weight_shape)\n softmax_weight = F.softmax(raw_weight, dim=-1)\n weights.append(softmax_weight)\n if self.pre_softmax:\n flatten_weights.append(raw_weight.view(flatten_shape))\n else:\n flatten_weights.append(softmax_weight.view(flatten_shape))\n\n cond = torch.cat(flatten_weights, dim=-1)\n if self.pre_softmax:\n cond = self.activation_func(cond)\n cond = self.gating_weight_cond_last(cond)\n cond = cond * embedding\n cond = self.activation_func(cond)\n\n raw_last_weight = self.gating_weight_last(cond)\n last_weight = F.softmax(raw_last_weight, dim = -1)\n\n module_outputs = [(layer_module(out)).unsqueeze(-2) \\\n for layer_module in self.layer_modules[0]]\n\n module_outputs = torch.cat(module_outputs, dim = -2 )\n\n # [TODO] Optimize using 1 * 1 convolution.\n\n for i in range(self.num_layers - 1):\n new_module_outputs = []\n for j, layer_module in enumerate(self.layer_modules[i + 1]):\n module_input = (module_outputs * \\\n weights[i][..., j, :].unsqueeze(-1)).sum(dim=-2)\n\n module_input = self.activation_func(module_input)\n new_module_outputs.append((\n layer_module(module_input)\n ).unsqueeze(-2))\n\n module_outputs = torch.cat(new_module_outputs, dim = -2)\n\n out = (module_outputs * last_weight.unsqueeze(-1)).sum(-2)\n out = self.activation_func(out)\n if self._network_type == 'policy':\n out = self.last(out)\n else:\n out = self.last(out).view(out.shape[0])\n if return_weights:\n return out, state, weights, last_weight\n return out, state\n\n\nclass SoftModularActorCriticNet(TorchModelV2, nn.Module):\n def __init__(self, observation_space, action_space, num_outputs,\n model_config, name, **kwargs):\n TorchModelV2.__init__(self, observation_space, action_space,\n num_outputs, model_config, name)\n nn.Module.__init__(self)\n self._current_value = None\n self._policy_net = ModularGatedCascadeCondNet(observation_space, \n action_space,\n num_outputs,\n model_config,\n name,\n network_type='policy')\n self._value_net = ModularGatedCascadeCondNet(observation_space, \n action_space, \n num_outputs,\n model_config, \n name,\n network_type='value')\n\n @override(TorchModelV2)\n def forward(self, input_dict, state, seq_lens, return_weights = False):\n logits, *_ = self._policy_net(input_dict, state, seq_lens, return_weights=return_weights)\n self._current_value, *_ = self._value_net(input_dict, state, seq_lens, \n return_weights=return_weights)\n if (self._current_value > 1000000).any():\n print(f'soft_modular_model line 323: vf too large: {self._current_value}')\n assert_nan_inf([logits, self._current_value])\n # clip the logits to prevent inf from kl\n logits = torch.clip(logits, min=-15.0, max=15.0)\n return logits, state\n\n @override(TorchModelV2)\n def value_function(self) -> torch.Tensor:\n if self._current_value is None:\n raise AssertionError(\"must call self._value_net.forward() first\")\n return self._current_value\n\n @property\n def policy_net(self) -> nn.Module:\n return self._policy_net\n\n @property\n def value_net(self) -> nn.Module:\n return self._value_net"
] | [
[
"torch.nn.functional.softmax"
]
] |
mukkachaitanya/parity-models | [
"29db79387c39ab5106d6b098cf26f70c49ee7450"
] | [
"clipper-parm/integration-tests/deploy_pytorch_models.py"
] | [
"from __future__ import absolute_import, print_function\nimport os\nimport sys\nimport requests\nimport json\nimport numpy as np\nimport time\nimport logging\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\nimport torch\nimport torch.utils.data as data\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nfrom test_utils import (create_docker_connection, BenchmarkException, headers,\n log_clipper_state)\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, os.path.abspath(\"%s/../clipper_admin\" % cur_dir))\n\nfrom clipper_admin.deployers.pytorch import deploy_pytorch_model, create_endpoint\n\nlogging.basicConfig(\n format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%y-%m-%d:%H:%M:%S',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\napp_name = \"pytorch-test\"\nmodel_name = \"pytorch-model\"\n\n\ndef normalize(x):\n return x.astype(np.double) / 255.0\n\n\ndef objective(y, pos_label):\n # prediction objective\n if y == pos_label:\n return 1\n else:\n return 0\n\n\ndef parsedata(train_path, pos_label):\n trainData = np.genfromtxt(train_path, delimiter=',', dtype=int)\n records = trainData[:, 1:]\n labels = trainData[:, :1]\n transformedlabels = [objective(ele, pos_label) for ele in labels]\n return (records, transformedlabels)\n\n\ndef predict(model, xs):\n preds = []\n for x in xs:\n p = model(x).data.numpy().tolist()[0]\n preds.append(str(p))\n return preds\n\n\ndef deploy_and_test_model(clipper_conn,\n model,\n version,\n link_model=False,\n predict_fn=predict):\n deploy_pytorch_model(clipper_conn, model_name, version, \"integers\",\n predict_fn, model)\n\n time.sleep(5)\n\n if link_model:\n clipper_conn.link_model_to_app(app_name, model_name)\n time.sleep(5)\n\n test_model(clipper_conn, app_name, version)\n\n\ndef test_model(clipper_conn, app, version):\n time.sleep(25)\n num_preds = 25\n num_defaults = 0\n addr = clipper_conn.get_query_addr()\n for i in range(num_preds):\n response = requests.post(\n \"http://%s/%s/predict\" % (addr, app),\n headers=headers,\n data=json.dumps({\n 'input': get_test_point()\n }))\n result = response.json()\n if response.status_code == requests.codes.ok and result[\"default\"]:\n num_defaults += 1\n elif response.status_code != requests.codes.ok:\n print(result)\n raise BenchmarkException(response.text)\n\n if num_defaults > 0:\n print(\"Error: %d/%d predictions were default\" % (num_defaults,\n num_preds))\n if num_defaults > num_preds / 2:\n raise BenchmarkException(\"Error querying APP %s, MODEL %s:%d\" %\n (app, model_name, version))\n\n\n# Define a simple NN model\nclass BasicNN(nn.Module):\n def __init__(self):\n super(BasicNN, self).__init__()\n self.net = nn.Linear(28 * 28, 2)\n\n def forward(self, x):\n if type(x) == np.ndarray:\n x = torch.from_numpy(x)\n x = x.float()\n x = Variable(x)\n x = x.view(1, 1, 28, 28)\n x = x / 255.0\n batch_size = x.size(0)\n x = x.view(batch_size, -1)\n output = self.net(x.float())\n return F.softmax(output)\n\n\ndef train(model):\n model.train()\n optimizer = optim.SGD(model.parameters(), lr=0.001)\n for epoch in range(10):\n for i, d in enumerate(train_loader, 1):\n image, j = d\n optimizer.zero_grad()\n output = model(image)\n loss = F.cross_entropy(output,\n Variable(\n torch.LongTensor([train_y[i - 1]])))\n loss.backward()\n optimizer.step()\n return model\n\n\ndef get_test_point():\n return [np.random.randint(255) for _ in range(784)]\n\n\n# Define a dataloader to read data\nclass TrainingDataset(data.Dataset):\n def __init__(self, data, label):\n self.imgs = data\n self.classes = label\n\n def __getitem__(self, index):\n img = self.imgs[index]\n label = self.classes[index]\n img = torch.Tensor(img)\n return img, torch.Tensor(label)\n\n\nif __name__ == \"__main__\":\n pos_label = 3\n try:\n clipper_conn = create_docker_connection(\n cleanup=True, start_clipper=True)\n\n train_path = os.path.join(cur_dir, \"data/train.data\")\n train_x, train_y = parsedata(train_path, pos_label)\n train_x = normalize(train_x)\n train_loader = TrainingDataset(train_x, train_y)\n\n try:\n clipper_conn.register_application(app_name, \"integers\",\n \"default_pred\", 100000)\n time.sleep(1)\n\n addr = clipper_conn.get_query_addr()\n response = requests.post(\n \"http://%s/%s/predict\" % (addr, app_name),\n headers=headers,\n data=json.dumps({\n 'input': get_test_point()\n }))\n result = response.json()\n if response.status_code != requests.codes.ok:\n print(\"Error: %s\" % response.text)\n raise BenchmarkException(\"Error creating app %s\" % app_name)\n\n version = 1\n\n model = BasicNN()\n nn_model = train(model)\n\n deploy_and_test_model(\n clipper_conn, nn_model, version, link_model=True)\n\n app_and_model_name = \"easy-register-app-model\"\n create_endpoint(clipper_conn, app_and_model_name, \"integers\",\n predict, nn_model)\n test_model(clipper_conn, app_and_model_name, 1)\n\n except BenchmarkException:\n log_clipper_state(clipper_conn)\n logger.exception(\"BenchmarkException\")\n clipper_conn = create_docker_connection(\n cleanup=True, start_clipper=False)\n sys.exit(1)\n else:\n clipper_conn = create_docker_connection(\n cleanup=True, start_clipper=False)\n except Exception:\n logger.exception(\"Exception\")\n clipper_conn = create_docker_connection(\n cleanup=True, start_clipper=False)\n sys.exit(1)\n"
] | [
[
"torch.nn.Linear",
"torch.autograd.Variable",
"numpy.genfromtxt",
"torch.from_numpy",
"numpy.random.randint",
"torch.LongTensor",
"torch.nn.functional.softmax",
"torch.Tensor"
]
] |
xabeab/BlueNRG-GUI | [
"1134c2625f9126b76aad3677af62827372db80f1"
] | [
"gui.py"
] | [
"from peripheral import *\nfrom processes import *\nfrom settings import *\nfrom tkinter import *\nfrom tkinter import ttk\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport os\nfrom multiprocessing import Process, Barrier, Queue\nfrom bluepy import btle\nimport matplotlib.animation as animation\nimport datetime as dt\n\n# Commands\ndef connectProcedure():\n connectButton.config(state=\"disabled\")\n disconnectButton.config(state=\"normal\")\n identifyActivityButton.config(state=\"normal\")\n identifyDevicesButton.config(state=\"disabled\")\n print(\"Connecting the devices...\")\n # Create dir to save data\n cwd = os.getcwd()\n os.mkdir(cwd + \"/Recordings - \" + dt.datetime.now().strftime('%c'))\n os.chdir(cwd + \"/Recordings - \" + dt.datetime.now().strftime('%c'))\n # Create peripheral objects\n peripherals = [ACM(macAdresses[i], i, LOCATIONS[i]) for i in range(5) if macAdresses[i] != '']\n # Create barrier object\n barrier = Barrier(len(peripherals))\n # Configure and start logging processes\n queue = Queue(-1)\n process = Process(target=runLogger, args=(queue,))\n process.start()\n # Start processes\n for peripheral in peripherals:\n process = Process(target=runProcess, args=(peripheral, barrier, queue))\n process.start()\n \n \ndef disconnectProcedure():\n masterClock.value = 0\n connectButton.config(state=\"normal\")\n disconnectButton.config(state=\"disabled\")\n identifyActivityButton.config(state=\"disabled\")\n identifyDevicesButton.config(state=\"normal\")\n identifyActivityButton.configure(bg=\"orange\")\n identifyActivity.value = 0\n os.chdir(\"..\")\n killAllProcesses()\n print(\"Devices disconnected\")\n\ndef closeProcedure():\n killAllProcesses()\n print(\"Application closed by user's request\")\n root.destroy()\n\ndef identifyActivityProcedure():\n if identifyActivity.value == 0:\n identifyActivityButton.configure(bg=\"red\")\n identifyActivity.value = 1\n print(\"Activity identification was added to the timestamps\")\n else:\n identifyActivityButton.configure(bg=\"orange\")\n identifyActivity.value = 0\n print(\"Activity identification was removed from the timestamps\")\n\ndef identifyDevicesProcedure():\n global macAdresses\n connectButton.config(state=\"normal\")\n disconnectButton.config(state=\"disabled\")\n identifyActivityButton.config(state=\"disabled\")\n identifyDevicesButton.config(state=\"normal\")\n macAdresses = [entries[i].get() for i in range(5)] \n print(\"The devices' MAC adresses were changed and added\")\n \ndef changeDevice(event):\n global line\n #Remove data from previous device\n for i in range(x_len):\n for idx in range(3):\n ys[idx].append(0)\n ys[idx] = ys[idx][-x_len:]\n line[idx].set_ydata(ys[idx])\n title = \"Device \" + str(combo.current()+1) + \" data\"\n a.set_title(title)\n\n\ndef animate(i, ys):\n deviceidx = combo.current() * 3\n for idx in range(3):\n ys[idx].append(dataToDisplay[idx + deviceidx])\n ys[idx] = ys[idx][-x_len:]\n line[idx].set_ydata(ys[idx])\n return line\n\nroot = Tk()\nroot.title(\"Activity Recognition based on Movement Utility\")\nroot.geometry(\"1000x600\")\nroot.resizable(0, 0)\nplt.style.use('ggplot')\n\n# Creating main frame\nmainFrame = Frame(root, width=500, height=500)\nmainFrame.grid(column=0, row=0, sticky=(N, W, E, S))\nroot.columnconfigure(0, weight=0)\nroot.columnconfigure(1, weight=0)\nroot.columnconfigure(2, weight=1)\nroot.rowconfigure(0, weight=1)\nroot.protocol('WM_DELETE_WINDOW', closeProcedure)\n\n# Combobox\ncombo = ttk.Combobox(root, values = [\"Device 1 - \" + str(LOCATIONS[0]), \"Device 2 - \" + str(LOCATIONS[1]), \"Device 3 - \" + str(LOCATIONS[2]), \"Device 4 - \" + str(LOCATIONS[3]), \"Device 5 - \" + str(LOCATIONS[4])])\ncombo.grid(row=1, column=2, padx=10, pady=5)\ncombo.current(0)\ncombo.bind(\"<<ComboboxSelected>>\", changeDevice)\n\n# Buttons\nidentifyDevicesButton = Button(mainFrame, text=\"IDENTIFY DEVICES\", bg=\"orange\", fg=\"white\", command=identifyDevicesProcedure, padx=20, pady=20)\nidentifyDevicesButton.grid(row=5, column=0, columnspan=2, padx=10, pady=10)\nconnectButton = Button(mainFrame, text=\"CONNECT & START\", bg=\"orange\", fg=\"white\", command=connectProcedure, padx=20, pady=20, state=\"disable\")\nconnectButton.grid(row=6, column=0, columnspan=2, padx=10, pady=10)\ndisconnectButton = Button(mainFrame, text=\"DISCONNECT\", bg=\"orange\", fg=\"white\", command=disconnectProcedure, padx=20, pady=20, state=\"disable\")\ndisconnectButton.grid(row=8, column=0, columnspan=2, padx=10, pady=10)\nidentifyActivityButton = Button(mainFrame, text=\"IDENTIFY ACTIVITY\", bg=\"orange\", fg=\"white\", command=identifyActivityProcedure, padx=20, pady=20, state=\"disable\")\nidentifyActivityButton.grid(row=9, column=0, columnspan=2, padx=10, pady=10)\n\n# Entry\nentries = []\nfor i in range(5):\n entry = Entry(mainFrame, font=40)\n entry.grid(row=i, column=1, padx=10, pady=1)\n entries.append(entry)\n \n\n# Labels for entries\nfor i in range(5):\n label = Label(mainFrame, text=\"MAC Adress -\" + \" Device \" + str(i+1) + \" - \" + str(LOCATIONS[i]))\n label.grid(row=i, column=0, padx=5)\n\n# Plot Initialization\n# Parameters\nys = []\nx_len = 300 # Number of points to display\ny_range = [-35000, 35000] # Range of possible Y values to display\nxs = list(range(0, x_len))\nfor i in range(3):\n ys.append([0] * x_len)\n\nf = Figure(figsize=(5, 5), dpi=100)\na = f.add_subplot(111)\n# Create a blank line. We will update the line in animate\nline = [a.plot(xs, ys[i])[0] for i in range(3)]\n\na.set_ylim(y_range)\na.set_title('Device 1 Data')\na.set_xlabel('Showing the last 300 samples')\n\ncanvas = FigureCanvasTkAgg(f, master=root)\ncanvas.draw()\ncanvas.get_tk_widget().grid(row=0, column=2, sticky=(N, W, E, S))\n\ntoolbarFrame = Frame(master=root)\ntoolbarFrame.grid(row=2, column=2, sticky=(W,E))\ntoolbar = NavigationToolbar2Tk(canvas, toolbarFrame)\ntoolbar.update()\n\n# Set up plot to call animate() function periodically\nani = animation.FuncAnimation(f, animate, fargs=(ys,), interval=20, blit=True)\n\nwhile True:\n root.update_idletasks()\n root.update()\n\n"
] | [
[
"matplotlib.use",
"matplotlib.animation.FuncAnimation",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"matplotlib.pyplot.style.use",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk"
]
] |
waissfowl/QAP_pt | [
"af9ccc67aa5fd84017bda796d9831521f63d983d"
] | [
"src/qap/Logger.py"
] | [
"import numpy as np\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom scipy.spatial import ConvexHull\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\n\nif torch.cuda.is_available():\n dtype = torch.cuda.FloatTensor\n dtype_l = torch.cuda.LongTensor\n torch.cuda.manual_seed(0)\nelse:\n dtype = torch.FloatTensor\n dtype_l = torch.LongTensor\n torch.manual_seed(0)\n\ndef compute_recovery_rate(pred, labels):\n pred = pred.max(2)[1]\n error = 1 - torch.eq(pred, labels).type(dtype)#.squeeze(2)\n frob_norm = error.mean(1)#.squeeze(1)\n accuracy = 1 - frob_norm\n accuracy = accuracy.mean(0).squeeze()\n return accuracy.data.cpu().numpy()#[0]\n\nclass Logger(object):\n def __init__(self, path_logger):\n directory = os.path.join(path_logger, 'plots/')\n self.path = path_logger\n self.path_dir = directory\n # Create directory if necessary\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n self.loss_train = []\n self.loss_test = []\n self.accuracy_train = []\n self.accuracy_test = []\n self.args = None\n\n def write_settings(self, args):\n self.args = {}\n # write info\n path = os.path.join(self.path, 'experiment.txt')\n with open(path, 'w') as file:\n for arg in vars(args):\n file.write(str(arg) + ' : ' + str(getattr(args, arg)) + '\\n')\n self.args[str(arg)] = getattr(args, arg)\n\n def save_model(self, model):\n save_dir = os.path.join(self.path, 'parameters/')\n # Create directory if necessary\n try:\n os.stat(save_dir)\n except:\n os.mkdir(save_dir)\n path = os.path.join(save_dir, 'gnn.pt')\n torch.save(model, path)\n print('Model Saved.')\n\n def load_model(self): \n load_dir = os.path.join(self.path, 'parameters/')\n # check if any training has been done before.\n try:\n os.stat(load_dir)\n except:\n print(\"Training has not been done before testing. This session will be terminated.\")\n sys.exit()\n path = os.path.join(load_dir, 'gnn.pt')\n print('Loading the most recent model...')\n siamese_gnn = torch.load(path)\n return siamese_gnn\n\n def add_train_loss(self, loss):\n self.loss_train.append(loss.data.cpu().numpy())\n\n def add_test_loss(self, loss):\n self.loss_test.append(loss)\n\n def add_train_accuracy(self, pred, labels):\n accuracy = compute_recovery_rate(pred, labels)\n self.accuracy_train.append(accuracy)\n\n def add_test_accuracy(self, pred, labels):\n accuracy = compute_recovery_rate(pred, labels)\n self.accuracy_test.append(accuracy)\n\n def plot_train_loss(self):\n plt.figure(0)\n plt.clf()\n iters = range(len(self.loss_train))\n plt.semilogy(iters, self.loss_train, 'b')\n plt.xlabel('iterations')\n plt.ylabel('Cross Entropy Loss')\n plt.title('Training Loss: p={}, p_e={}'\n .format(self.args['edge_density'], self.args['noise']))\n path = os.path.join(self.path_dir, 'training_loss.png') \n plt.savefig(path)\n\n def plot_test_loss(self):\n plt.figure(1)\n plt.clf()\n test_freq = self.args['test_freq']\n iters = test_freq * range(len(self.loss_test))\n plt.semilogy(iters, self.loss_test, 'b')\n plt.xlabel('iterations')\n plt.ylabel('Cross Entropy Loss')\n plt.title('Testing Loss: p={}, p_e={}'\n .format(self.args['edge_density'], self.args['noise']))\n path = os.path.join(self.path_dir, 'testing_loss.png') \n plt.savefig(path)\n\n def plot_train_accuracy(self):\n plt.figure(0)\n plt.clf()\n iters = range(len(self.accuracy_train))\n plt.plot(iters, self.accuracy_train, 'b')\n plt.xlabel('iterations')\n plt.ylabel('Accuracy')\n plt.title('Training Accuracy: p={}, p_e={}'\n .format(self.args['edge_density'], self.args['noise']))\n path = os.path.join(self.path_dir, 'training_accuracy.png') \n plt.savefig(path)\n\n def plot_test_accuracy(self):\n plt.figure(1)\n plt.clf()\n test_freq = self.args['test_freq']\n iters = test_freq * range(len(self.accuracy_test))\n plt.plot(iters, self.accuracy_test, 'b')\n plt.xlabel('iterations')\n plt.ylabel('Accuracy')\n plt.title('Testing Accuracy: p={}, p_e={}'\n .format(self.args['edge_density'], self.args['noise']))\n path = os.path.join(self.path_dir, 'testing_accuracy.png') \n plt.savefig(path)\n\n def save_results(self):\n path = os.path.join(self.path, 'results.npz')\n np.savez(path, accuracy_train=np.array(self.accuracy_train),\n accuracy_test=np.array(self.accuracy_test),\n loss_train=self.loss_train, loss_test=self.loss_test)\n\n"
] | [
[
"matplotlib.use",
"numpy.array",
"torch.cuda.manual_seed",
"torch.eq",
"matplotlib.pyplot.xlabel",
"torch.save",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf"
]
] |
KilnOfTheSecondFlame/EnvironmentDashboard | [
"44818e2f55e2d3aee14f01fb05daa058e83e9d23"
] | [
"app.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.io as pio\nimport plotly.figure_factory as ff\nimport plotly.graph_objects as go\nimport pandas as pd\nimport configparser\n\nfrom plotly.subplots import make_subplots\nfrom dash.dependencies import Input, Output\nfrom EnvironmentSensor import EnvironmentSensor\nfrom KostalPlenticore import SolarPoller\n\nconfig = configparser.ConfigParser()\nconfig.read('config.conf')\npoll_time = float(config.get('PiEnvironmentSensor', 'sensor_poll_time'))\ndebug_flag = config.getboolean('DEFAULT', 'debug')\ndashboard_name = config.get('DEFAULT', 'dashboard_name')\nip_address_converter = config.get('Converter', 'device_ip')\npassword_converter = config.get('Converter', 'password')\nconverter_poll_time = float(config.get('Converter', 'converter_poll_time'))\n\n\ndef setup_app():\n environment_sensor = EnvironmentSensor(\n poll_time=poll_time,\n debug=debug_flag\n )\n solar_poller = SolarPoller(\n ip=ip_address_converter,\n password=password_converter,\n poll_time=converter_poll_time,\n debug=debug_flag\n )\n environment_sensor.start()\n solar_poller.start()\n\n\ndef draw_solar_figure():\n df = pd.read_csv('temp.csv', header=5)\n df.Zeit = pd.DatetimeIndex(\n pd.to_datetime(df.Zeit, unit='s')).tz_localize('Etc/UTC').tz_convert('Europe/Zurich').tz_localize(None)\n sub_df = df[['Zeit', 'HC1 P', 'HC2 P', 'HC3 P', 'SOC H', 'DC1 P', 'DC2 P']]\n sub_df = sub_df.fillna(method='pad')\n sub_df['DC P'] = sub_df['DC1 P'] + sub_df['DC2 P']\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n fig.add_trace(\n go.Scatter(\n x=sub_df['Zeit'],\n y=sub_df['SOC H'],\n name='Battery Charge Status',\n mode='lines',\n fill='tozeroy',\n opacity=0.1,\n hoverinfo='name+y'\n ),\n secondary_y=True,\n )\n fig.add_trace(\n go.Scatter(\n x=sub_df['Zeit'],\n y=sub_df['DC P'],\n name='Energy Production',\n mode='lines',\n fill='tozeroy',\n hoverinfo='name+y'\n ),\n secondary_y=False,\n )\n fig.add_trace(\n go.Scatter(\n x=sub_df['Zeit'],\n y=sub_df['HC1 P'],\n name='Battery Consumption',\n mode='lines',\n hoverinfo='name+y'\n ),\n secondary_y=False,\n )\n fig.add_trace(\n go.Scatter(\n x=sub_df['Zeit'],\n y=sub_df['HC2 P'],\n name='PV Consumption',\n mode='lines',\n hoverinfo='name+y'\n ),\n secondary_y=False,\n )\n fig.add_trace(\n go.Scatter(\n x=sub_df['Zeit'],\n y=sub_df['HC3 P'],\n name='Grid Consumption',\n mode='lines',\n hoverinfo='name+y'\n ),\n secondary_y=False,\n )\n fig.update_xaxes(title_text=\"Datum und Zeit\", dtick=7200000.0)\n fig.update_yaxes(title_text=\"Power Consumption [W]\", secondary_y=False)\n fig.update_yaxes(title_text=\"Battery Charge [%]\", secondary_y=True, range=[-5, 110])\n fig.layout.template = 'simple_white'\n return fig\n\n\ndef get_environment_stats():\n env_df = pd.read_csv('environment.log')\n latest = env_df.tail(1)\n return latest.Temperature.values[0], latest.Pressure.values[0], latest.Humidity.values[0]\n\n\nsetup_app()\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\npio.templates.default = \"ggplot2\"\n\n# Default Graphs\nenv_table = ff.create_table(\n [\n ['Temperatur', 'Luftdruck', 'Feuchtigkeit'],\n ['25.0°C', '1025.2mbar', '48.3% Rel. Luftf.']\n ]\n)\nfor i in range(len(env_table.layout.annotations)):\n env_table.layout.annotations[i].font.size = 20\n\napp.layout = html.Div(\n children=[\n html.H1(dashboard_name),\n dcc.Tabs(\n children=[\n dcc.Tab(\n label='Umgebungsmonitor',\n children=[dcc.Graph(id='environment-stats', figure=env_table),\n dcc.Interval(\n id='environment-interval',\n interval=10 * 1000,\n n_intervals=0\n )]\n ),\n dcc.Tab(\n label='Solaranlage',\n children=[\n dcc.Graph(id='solar-lastday', figure=draw_solar_figure()),\n dcc.Interval(\n id='solar-interval',\n interval=30 * 1000,\n n_intervals=0\n )]\n )\n ],\n mobile_breakpoint=0\n )\n ]\n)\n\n\[email protected](Output('environment-stats', 'figure'),\n [Input('environment-interval', 'n_intervals')])\ndef update_environment_status(*args, **kwargs):\n temperature, pressure, humidity = get_environment_stats()\n env_table = ff.create_table(\n [\n ['Temperatur', 'Luftdruck', 'Feuchtigkeit'],\n [\n '{0:4.2f}°C'.format(temperature),\n '{0:5.2f} mbar'.format(pressure),\n '{0:3.2f}% Relative Luftfeuchtigkeit'.format(humidity)\n ]\n ]\n )\n for index in range(len(env_table.layout.annotations)):\n env_table.layout.annotations[index].font.size = 25\n return env_table\n\n\[email protected](Output('solar-lastday', 'figure'),\n [Input('solar-interval', 'n_intervals')])\ndef update_solar_figure(*args, **kwargs):\n return draw_solar_figure()\n"
] | [
[
"pandas.to_datetime",
"pandas.read_csv"
]
] |
zvikam/Checkpoint-CSA | [
"ea428078d99e0768637f65f19387a6f9c5246f52"
] | [
"2020/Can_You_See_It/Can_You_See_It2.py"
] | [
"import cv2\nimport time\nimport os\nimport sys\nimport numpy as np\n\n\ndef get_divisors(n):\n for i in range(8, int(n / 2) + 1):\n if n % i == 0:\n yield i\n yield n\n\nfor i in sys.argv[1:]:\n bindata = np.fromfile('data2', dtype='uint8')[int(i):]\n\n n = int(len(bindata))\n dims = [(c, int(n/c)) for c in get_divisors(n)]\n #dims = [(int(n/12), 12)]\n for d in dims:\n nd = bindata.reshape(d)\n cv2.imwrite(\"image_%dx%d.bmp\" % (d), nd)\n"
] | [
[
"numpy.fromfile"
]
] |
Mdlglobal-atlassian-net/agents | [
"ba3817ea48d574d314017542e1e4858566f953f4"
] | [
"tf_agents/bandits/agents/constraints_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.bandits.agents.constraints.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tf_agents.bandits.agents import constraints\nfrom tf_agents.bandits.networks import global_and_arm_feature_network\nfrom tf_agents.bandits.specs import utils as bandit_spec_utils\nfrom tf_agents.networks import network\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\n\n\ntf.compat.v1.enable_v2_behavior()\n\n\nclass GreaterThan2Constraint(constraints.BaseConstraint):\n\n def __call__(self, observation, actions=None):\n \"\"\"Returns the probability of input actions being feasible.\"\"\"\n if actions is None:\n actions = tf.range(self._action_spec.minimum, self._action_spec.maximum)\n feasibility_prob = tf.cast(tf.greater(actions, 2), tf.float32)\n return feasibility_prob\n\n\nclass BaseConstraintTest(tf.test.TestCase):\n\n def testSimpleCase(self):\n obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n time_step_spec = ts.time_step_spec(obs_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=5)\n gt2c = GreaterThan2Constraint(time_step_spec, action_spec)\n feasibility_prob = gt2c(observation=None)\n self.assertAllEqual([0, 0, 0, 1, 1], self.evaluate(feasibility_prob))\n\n\nclass DummyNet(network.Network):\n\n def __init__(self, unused_observation_spec, action_spec, name=None):\n super(DummyNet, self).__init__(\n unused_observation_spec, state_spec=(), name=name)\n action_spec = tf.nest.flatten(action_spec)[0]\n num_actions = action_spec.maximum - action_spec.minimum + 1\n\n # Store custom layers that can be serialized through the Checkpointable API.\n self._dummy_layers = [\n tf.keras.layers.Dense(\n num_actions,\n kernel_initializer=tf.compat.v1.initializers.constant(\n [[1, 1.5, 2],\n [1, 1.5, 4]]),\n bias_initializer=tf.compat.v1.initializers.constant(\n [[1], [1], [-10]]))\n ]\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n inputs = tf.cast(inputs, tf.float32)\n for layer in self._dummy_layers:\n inputs = layer(inputs)\n return inputs, network_state\n\n\nclass NeuralConstraintTest(tf.test.TestCase):\n\n def setUp(self):\n super(NeuralConstraintTest, self).setUp()\n tf.compat.v1.enable_resource_variables()\n self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=2)\n self._observation_spec = self._time_step_spec.observation\n\n def testCreateConstraint(self):\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n constraints.NeuralConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n\n def testInitializeConstraint(self):\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n neural_constraint = constraints.NeuralConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n init_op = neural_constraint.initialize()\n if not tf.executing_eagerly():\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertIsNone(sess.run(init_op))\n\n def testComputeLoss(self):\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n actions = tf.constant([0, 1], dtype=tf.int32)\n rewards = tf.constant([0.5, 3.0], dtype=tf.float32)\n\n neural_constraint = constraints.NeuralConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n init_op = neural_constraint.initialize()\n if not tf.executing_eagerly():\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertIsNone(sess.run(init_op))\n loss = neural_constraint.compute_loss(\n observations,\n actions,\n rewards)\n self.assertAllClose(self.evaluate(loss), 42.25)\n\n def testComputeLossWithArmFeatures(self):\n obs_spec = bandit_spec_utils.create_per_arm_observation_spec(\n global_dim=2, per_arm_dim=3, max_num_actions=3)\n time_step_spec = ts.time_step_spec(obs_spec)\n constraint_net = (\n global_and_arm_feature_network.create_feed_forward_common_tower_network(\n obs_spec,\n global_layers=(4,),\n arm_layers=(4,),\n common_layers=(4,)))\n neural_constraint = constraints.NeuralConstraint(\n time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n\n observations = {\n bandit_spec_utils.GLOBAL_FEATURE_KEY:\n tf.constant([[1, 2], [3, 4]], dtype=tf.float32),\n bandit_spec_utils.PER_ARM_FEATURE_KEY:\n tf.cast(\n tf.reshape(tf.range(18), shape=[2, 3, 3]), dtype=tf.float32)\n }\n actions = tf.constant([0, 1], dtype=tf.int32)\n rewards = tf.constant([0.5, 3.0], dtype=tf.float32)\n\n init_op = neural_constraint.initialize()\n if not tf.executing_eagerly():\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertIsNone(sess.run(init_op))\n loss = neural_constraint.compute_loss(\n observations,\n actions,\n rewards)\n self.assertGreater(self.evaluate(loss), 0.0)\n\n def testComputeActionFeasibility(self):\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n\n neural_constraint = constraints.NeuralConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n init_op = neural_constraint.initialize()\n if not tf.executing_eagerly():\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertIsNone(sess.run(init_op))\n\n observation = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n feasibility_prob = neural_constraint(observation)\n self.assertAllClose(self.evaluate(feasibility_prob), np.ones([2, 3]))\n\n\nclass QuantileConstraintTest(tf.test.TestCase):\n\n def setUp(self):\n super(QuantileConstraintTest, self).setUp()\n tf.compat.v1.enable_resource_variables()\n self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=2)\n self._observation_spec = self._time_step_spec.observation\n\n def testCreateConstraint(self):\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n constraints.QuantileConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n\n def testComputeActionFeasibility(self):\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n\n quantile_constraint = constraints.QuantileConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n init_op = quantile_constraint.initialize()\n if not tf.executing_eagerly():\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertIsNone(sess.run(init_op))\n\n observation = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n feasibility_prob = quantile_constraint(observation)\n self.assertAllGreaterEqual(self.evaluate(feasibility_prob), 0.0)\n self.assertAllLessEqual(self.evaluate(feasibility_prob), 1.0)\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.range",
"tensorflow.compat.v1.enable_v2_behavior",
"numpy.ones",
"tensorflow.nest.flatten",
"tensorflow.constant",
"tensorflow.executing_eagerly",
"tensorflow.compat.v1.initializers.constant",
"tensorflow.test.main",
"tensorflow.greater",
"tensorflow.compat.v1.enable_resource_variables",
"tensorflow.cast"
]
] |
dkarageo/lovpy | [
"85f43c07aeed4b318238c35da606de2dc65ca24f"
] | [
"examples/valid_numpy_test.py"
] | [
"import numpy as np\n\n\ndef print_const(arr):\n print(repr(arr[1]))\n\n\ndef main():\n x = np.array([.1, .2, .9])\n print_const(x)\n print_const(x)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] |
Yu-Yy/MathPoseGNN | [
"9759955957b4cca192f5a98031245277c12750f3"
] | [
"cvpack/torch_modeling/engine/engine.py"
] | [
"# encoding: utf-8\n\nimport os\nimport os.path as osp\nimport time\nimport argparse\n\nimport torch\nimport torch.distributed as dist\n\nfrom collections import OrderedDict\nfrom cvpack.utils.pyt_utils import (\n parse_torch_devices, extant_file, link_file, ensure_dir)\nfrom cvpack.utils.logger import get_logger\n\nfrom .checkpoint import load_model\n\n\nclass State(object):\n def __init__(self):\n self.iteration = 0\n self.model = None\n self.optimizer = None\n self.scheduler = None\n\n def register(self, **kwargs):\n for k, v in kwargs.items():\n assert k in ['iteration', 'model', 'optimizer', 'scheduler']\n setattr(self, k, v)\n\n\nclass Engine(object):\n def __init__(self, cfg, custom_parser=None):\n self.version = 0.1\n self.state = State()\n self.devices = None\n self.distributed = False\n self.logger = None\n self.cfg = cfg\n\n if custom_parser is None:\n self.parser = argparse.ArgumentParser()\n else:\n assert isinstance(custom_parser, argparse.ArgumentParser)\n self.parser = custom_parser\n\n self.inject_default_parser()\n self.args = self.parser.parse_args()\n\n self.continue_state_object = self.args.continue_fpath\n\n if 'WORLD_SIZE' in os.environ:\n self.distributed = int(os.environ['WORLD_SIZE']) > 1\n \n if self.distributed:\n self.local_rank = self.args.local_rank\n self.world_size = int(os.environ['WORLD_SIZE']) # nproc_per_node * node\n # self.world_rank = int(os.environ['RANK'])\n torch.cuda.set_device(self.local_rank)\n dist_url = self.args.dist_url #'env://'\n dist.init_process_group(backend=\"nccl\", init_method=dist_url, world_size=self.world_size, rank=self.local_rank) # 'env://', rank=1, world_size=self.world_size\n dist.barrier()\n self.devices = [i for i in range(self.world_size)]\n else:\n self.local_rank = 0\n self.devices = parse_torch_devices(self.args.devices)\n\n def setup_log(self, name='train', log_dir=None, file_name=None):\n if not self.logger:\n self.logger = get_logger(\n name, log_dir, self.args.local_rank, filename=file_name)\n else:\n self.logger.warning('already exists logger')\n return self.logger\n\n def inject_default_parser(self):\n self.parser.add_argument(\n '-d', '--devices', default='0',\n help='set data parallel training')\n self.parser.add_argument(\n '-c', '--continue', type=extant_file, metavar=\"FILE\",\n dest=\"continue_fpath\",\n help='continue from one certain checkpoint')\n self.parser.add_argument(\n '--local_rank', default=0, type=int, # local ranking ?\n help='process rank on node')\n self.parser.add_argument('--dist_url',\n default='tcp://127.0.0.1:23457',\n type=str,\n help='url used to set up distributed training')\n\n def register_state(self, **kwargs):\n self.state.register(**kwargs)\n\n def update_iteration(self, iteration):\n self.state.iteration = iteration\n\n def save_checkpoint(self, path):\n self.logger.info(\"Saving checkpoint to file {}\".format(path))\n t_start = time.time()\n\n state_dict = {}\n new_state_dict = OrderedDict()\n\n for k, v in self.state.model.state_dict().items():\n key = k\n if k.split('.')[0] == 'module':\n key = k[7:]\n new_state_dict[key] = v\n state_dict['model'] = new_state_dict\n\n if self.state.optimizer:\n state_dict['optimizer'] = self.state.optimizer.state_dict()\n if self.state.scheduler:\n state_dict['scheduler'] = self.state.scheduler.state_dict()\n if self.state.iteration:\n state_dict['iteration'] = self.state.iteration\n\n t_io_begin = time.time()\n torch.save(state_dict, path)\n t_end = time.time()\n\n del state_dict\n del new_state_dict\n\n self.logger.info(\n \"Save checkpoint to file {}, \"\n \"Time usage:\\n\\tprepare snapshot: {}, IO: {}\".format(\n path, t_io_begin - t_start, t_end - t_io_begin))\n\n\n def save_best_model(self, path):\n self.logger.info(\"Saving best model to file {}\".format(path))\n t_start = time.time()\n\n state_dict = {}\n new_state_dict = OrderedDict()\n\n for k, v in self.state.model.state_dict().items():\n key = k\n if k.split('.')[0] == 'module':\n key = k[7:]\n new_state_dict[key] = v\n state_dict['model'] = new_state_dict\n\n if self.state.optimizer:\n state_dict['optimizer'] = self.state.optimizer.state_dict()\n if self.state.scheduler:\n state_dict['scheduler'] = self.state.scheduler.state_dict()\n if self.state.iteration:\n state_dict['iteration'] = self.state.iteration\n\n t_io_begin = time.time()\n torch.save(state_dict, path)\n t_end = time.time()\n\n del state_dict\n del new_state_dict\n\n self.logger.info(\n \"Save best model to file {}, \"\n \"Time usage:\\n\\tprepare snapshot: {}, IO: {}\".format(\n path, t_io_begin - t_start, t_end - t_io_begin))\n\n def load_checkpoint(self, weights, is_restore=False):\n\n t_start = time.time()\n\n if weights.endswith(\".pkl\"):\n # for caffe2 model\n from maskrcnn_benchmark.utils.c2_model_loading import \\\n load_c2_format\n loaded = load_c2_format(self.cfg, weights)\n else:\n loaded = torch.load(weights, map_location=torch.device(\"cpu\"))\n\n t_io_end = time.time()\n if \"model\" not in loaded:\n loaded = dict(model=loaded)\n\n self.state.model = load_model(\n self.state.model, loaded['model'], self.logger,\n is_restore=is_restore)\n\n if \"optimizer\" in loaded:\n self.state.optimizer.load_state_dict(loaded['optimizer'])\n if \"iteration\" in loaded:\n self.state.iteration = loaded['iteration']\n if \"scheduler\" in loaded:\n self.state.scheduler.load_state_dict(loaded[\"scheduler\"])\n del loaded\n\n t_end = time.time()\n self.logger.info(\n \"Load checkpoint from file {}, \"\n \"Time usage:\\n\\tIO: {}, restore snapshot: {}\".format(\n weights, t_io_end - t_start, t_end - t_io_end))\n\n def save_and_link_checkpoint(self, snapshot_dir):\n ensure_dir(snapshot_dir)\n current_iter_checkpoint = osp.join(\n snapshot_dir, 'iter-{}.pth'.format(self.state.iteration))\n self.save_checkpoint(current_iter_checkpoint)\n last_iter_checkpoint = osp.join(\n snapshot_dir, 'iter-last.pth')\n link_file(current_iter_checkpoint, last_iter_checkpoint)\n\n def restore_checkpoint(self, is_restore=True):\n self.load_checkpoint(self.continue_state_object, is_restore=is_restore)\n\n def __exit__(self, type, value, tb):\n torch.cuda.empty_cache()\n if type is not None:\n self.logger.warning(\n \"A exception occurred during Engine initialization, \"\n \"give up running process\")\n return False\n\n def __enter__(self):\n return self\n"
] | [
[
"torch.device",
"torch.distributed.init_process_group",
"torch.save",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.distributed.barrier"
]
] |
zbzhu99/ILSwiss | [
"9be4ff89a0005cd404014696aacc0eefd7596b86"
] | [
"rlkit/data_management/normalizer.py"
] | [
"\"\"\"\nBased on code from Marcin Andrychowicz\n\"\"\"\nimport numpy as np\n\n\nclass Normalizer(object):\n def __init__(\n self,\n size,\n eps=1e-8,\n default_clip_range=np.inf,\n mean=0,\n std=1,\n ):\n self.size = size\n self.eps = eps\n self.default_clip_range = default_clip_range\n self.sum = np.zeros(self.size, np.float32)\n self.sumsq = np.zeros(self.size, np.float32)\n self.count = np.ones(1, np.float32)\n self.mean = mean + np.zeros(self.size, np.float32)\n self.std = std * np.ones(self.size, np.float32)\n self.synchronized = True\n\n def update(self, v):\n if v.ndim == 1:\n v = np.expand_dims(v, 0)\n assert v.ndim == 2\n assert v.shape[1] == self.size\n self.sum += v.sum(axis=0)\n self.sumsq += (np.square(v)).sum(axis=0)\n self.count[0] += v.shape[0]\n self.synchronized = False\n\n def normalize(self, v, clip_range=None):\n if not self.synchronized:\n self.synchronize()\n if clip_range is None:\n clip_range = self.default_clip_range\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return np.clip((v - mean) / std, -clip_range, clip_range)\n\n def denormalize(self, v):\n if not self.synchronized:\n self.synchronize()\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return mean + v * std\n\n def synchronize(self):\n self.mean[...] = self.sum / self.count[0]\n self.std[...] = np.sqrt(\n np.maximum(\n np.square(self.eps), self.sumsq / self.count[0] - np.square(self.mean)\n )\n )\n self.synchronized = True\n\n\nclass IdentityNormalizer(object):\n def __init__(self, *args, **kwargs):\n pass\n\n def update(self, v):\n pass\n\n def normalize(self, v, clip_range=None):\n return v\n\n def denormalize(self, v):\n return v\n\n\nclass FixedNormalizer(object):\n def __init__(\n self,\n size,\n default_clip_range=np.inf,\n mean=0,\n std=1,\n eps=1e-8,\n ):\n assert std > 0\n std = std + eps\n self.size = size\n self.default_clip_range = default_clip_range\n self.mean = mean + np.zeros(self.size, np.float32)\n self.std = std + np.zeros(self.size, np.float32)\n self.eps = eps\n\n def set_mean(self, mean):\n self.mean = mean + np.zeros(self.size, np.float32)\n\n def set_std(self, std):\n std = std + self.eps\n self.std = std + np.zeros(self.size, np.float32)\n\n def normalize(self, v, clip_range=None):\n if clip_range is None:\n clip_range = self.default_clip_range\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return np.clip((v - mean) / std, -clip_range, clip_range)\n\n def denormalize(self, v):\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return mean + v * std\n\n def copy_stats(self, other):\n self.set_mean(other.mean)\n self.set_std(other.std)\n\n\nclass RunningMeanStd(object):\n \"\"\"Calulates the running mean and std of a data stream.\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n \"\"\"\n\n def __init__(self, mean=0.0, std=1.0) -> None:\n self.mean, self.var = mean, std\n self.count = 0\n\n def update(self, x: np.ndarray) -> None:\n \"\"\"Add a batch of item into RMS with the same shape, modify mean/var/count.\"\"\"\n batch_mean, batch_var = np.mean(x, axis=0), np.var(x, axis=0)\n batch_count = len(x)\n\n delta = batch_mean - self.mean\n total_count = self.count + batch_count\n\n new_mean = self.mean + delta * batch_count / total_count\n m_a = self.var * self.count\n m_b = batch_var * batch_count\n m_2 = m_a + m_b + delta ** 2 * self.count * batch_count / total_count\n new_var = m_2 / total_count\n\n self.mean, self.var = new_mean, new_var\n self.count = total_count\n"
] | [
[
"numpy.square",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"numpy.var",
"numpy.clip",
"numpy.expand_dims"
]
] |
dcdenu4/invest | [
"3d115d4d903674236f1e9e9962d737029ebd0701"
] | [
"tests/test_ufrm.py"
] | [
"# coding=UTF-8\r\n\"\"\"Tests for Urban Flood Risk Mitigation Model.\"\"\"\r\nimport unittest\r\nimport tempfile\r\nimport shutil\r\nimport os\r\n\r\nfrom osgeo import gdal\r\nfrom osgeo import osr\r\nfrom osgeo import ogr\r\nimport numpy\r\nimport pygeoprocessing\r\nimport shapely.geometry\r\n\r\n\r\nclass UFRMTests(unittest.TestCase):\r\n \"\"\"Tests for the Urban Flood Risk Mitigation Model.\"\"\"\r\n\r\n def setUp(self):\r\n \"\"\"Override setUp function to create temp workspace directory.\"\"\"\r\n # this lets us delete the workspace after its done no matter the\r\n # the rest result\r\n self.workspace_dir = tempfile.mkdtemp(suffix='\\U0001f60e') # smiley\r\n\r\n def tearDown(self):\r\n \"\"\"Override tearDown function to remove temporary directory.\"\"\"\r\n shutil.rmtree(self.workspace_dir)\r\n\r\n def _make_args(self):\r\n \"\"\"Create args list for UFRM.\"\"\"\r\n base_dir = os.path.dirname(__file__)\r\n args = {\r\n 'aoi_watersheds_path': os.path.join(\r\n base_dir, '..', 'data', 'invest-test-data', 'ufrm',\r\n 'watersheds.gpkg'),\r\n 'built_infrastructure_vector_path': os.path.join(\r\n base_dir, '..', 'data', 'invest-test-data', 'ufrm',\r\n 'infrastructure.gpkg'),\r\n 'curve_number_table_path': os.path.join(\r\n base_dir, '..', 'data', 'invest-test-data', 'ufrm',\r\n 'Biophysical_water_SF.csv'),\r\n 'infrastructure_damage_loss_table_path': os.path.join(\r\n base_dir, '..', 'data', 'invest-test-data', 'ufrm',\r\n 'Damage.csv'),\r\n 'lulc_path': os.path.join(\r\n base_dir, '..', 'data', 'invest-test-data', 'ufrm',\r\n 'lulc.tif'),\r\n 'rainfall_depth': 40,\r\n 'results_suffix': 'Test1',\r\n 'soils_hydrological_group_raster_path': os.path.join(\r\n base_dir, '..', 'data', 'invest-test-data', 'ufrm',\r\n 'soilgroup.tif'),\r\n 'workspace_dir': self.workspace_dir,\r\n }\r\n return args\r\n\r\n def test_ufrm_regression(self):\r\n \"\"\"UFRM: regression test.\"\"\"\r\n from natcap.invest import urban_flood_risk_mitigation\r\n args = self._make_args()\r\n urban_flood_risk_mitigation.execute(args)\r\n\r\n result_vector = gdal.OpenEx(os.path.join(\r\n args['workspace_dir'], 'flood_risk_service_Test1.shp'),\r\n gdal.OF_VECTOR)\r\n result_layer = result_vector.GetLayer()\r\n\r\n # Check that all four expected fields are there.\r\n self.assertEqual(\r\n set(('aff_bld', 'serv_blt', 'rnf_rt_idx', 'rnf_rt_m3',\r\n 'flood_vol')),\r\n set(field.GetName() for field in result_layer.schema))\r\n\r\n result_feature = result_layer.GetFeature(0)\r\n for fieldname, expected_value in (\r\n ('aff_bld', 187010830.32202843),\r\n ('serv_blt', 13253546667257.65),\r\n ('rnf_rt_idx', 0.70387527942),\r\n ('rnf_rt_m3', 70870.4765625),\r\n ('flood_vol', 29815.640625)):\r\n result_val = result_feature.GetField(fieldname)\r\n places_to_round = (\r\n int(round(numpy.log(expected_value)/numpy.log(10)))-6)\r\n self.assertAlmostEqual(\r\n result_val, expected_value, places=-places_to_round)\r\n\r\n result_feature = None\r\n result_layer = None\r\n result_vector = None\r\n\r\n def test_ufrm_regression_no_infrastructure(self):\r\n \"\"\"UFRM: regression for no infrastructure.\"\"\"\r\n from natcap.invest import urban_flood_risk_mitigation\r\n args = self._make_args()\r\n del args['built_infrastructure_vector_path']\r\n urban_flood_risk_mitigation.execute(args)\r\n\r\n result_raster = gdal.OpenEx(os.path.join(\r\n args['workspace_dir'], 'Runoff_retention_m3_Test1.tif'),\r\n gdal.OF_RASTER)\r\n band = result_raster.GetRasterBand(1)\r\n array = band.ReadAsArray()\r\n nodata = band.GetNoDataValue()\r\n band = None\r\n result_raster = None\r\n result_sum = numpy.sum(array[~numpy.isclose(array, nodata)])\r\n # expected result observed from regression run.\r\n expected_result = 156070.36\r\n self.assertAlmostEqual(result_sum, expected_result, places=0)\r\n\r\n result_vector = gdal.OpenEx(os.path.join(\r\n args['workspace_dir'], 'flood_risk_service_Test1.shp'),\r\n gdal.OF_VECTOR)\r\n result_layer = result_vector.GetLayer()\r\n result_feature = result_layer.GetFeature(0)\r\n\r\n # Check that only the two expected fields are there.\r\n self.assertEqual(\r\n set(('rnf_rt_idx', 'rnf_rt_m3', 'flood_vol')),\r\n set(field.GetName() for field in result_layer.schema))\r\n\r\n for fieldname, expected_value in (\r\n ('rnf_rt_idx', 0.70387527942),\r\n ('rnf_rt_m3', 70870.4765625),\r\n ('flood_vol', 29815.640625)):\r\n result_val = result_feature.GetField(fieldname)\r\n places_to_round = (\r\n int(round(numpy.log(expected_value)/numpy.log(10)))-6)\r\n self.assertAlmostEqual(\r\n result_val, expected_value, places=-places_to_round)\r\n\r\n def test_ufrm_value_error_on_bad_soil(self):\r\n \"\"\"UFRM: assert exception on bad soil raster values.\"\"\"\r\n from natcap.invest import urban_flood_risk_mitigation\r\n args = self._make_args()\r\n\r\n bad_soil_raster = os.path.join(\r\n self.workspace_dir, 'bad_soilgroups.tif')\r\n value_map = {\r\n 1: 1,\r\n 2: 2,\r\n 3: 9, # only 1, 2, 3, 4 are valid values for this raster.\r\n 4: 4\r\n }\r\n pygeoprocessing.reclassify_raster(\r\n (args['soils_hydrological_group_raster_path'], 1), value_map,\r\n bad_soil_raster, gdal.GDT_Int16, -9)\r\n args['soils_hydrological_group_raster_path'] = bad_soil_raster\r\n\r\n with self.assertRaises(ValueError) as cm:\r\n urban_flood_risk_mitigation.execute(args)\r\n\r\n actual_message = str(cm.exception)\r\n expected_message = (\r\n 'Check that the Soil Group raster does not contain')\r\n self.assertTrue(expected_message in actual_message)\r\n\r\n def test_ufrm_value_error_on_bad_lucode(self):\r\n \"\"\"UFRM: assert exception on missing lucodes.\"\"\"\r\n import pandas\r\n from natcap.invest import urban_flood_risk_mitigation\r\n args = self._make_args()\r\n\r\n bad_cn_table_path = os.path.join(\r\n self.workspace_dir, 'bad_cn_table.csv')\r\n cn_table = pandas.read_csv(args['curve_number_table_path'])\r\n\r\n # drop a row with an lucode known to exist in lulc raster\r\n # This is a code that will successfully index into the\r\n # CN table sparse matrix, but will not return valid data.\r\n bad_cn_table = cn_table[cn_table['lucode'] != 0]\r\n bad_cn_table.to_csv(bad_cn_table_path, index=False)\r\n args['curve_number_table_path'] = bad_cn_table_path\r\n\r\n with self.assertRaises(ValueError) as cm:\r\n urban_flood_risk_mitigation.execute(args)\r\n\r\n actual_message = str(cm.exception)\r\n expected_message = (\r\n f'The biophysical table is missing a row for lucode(s) {[0]}')\r\n self.assertEqual(expected_message, actual_message)\r\n\r\n # drop rows with lucodes known to exist in lulc raster\r\n # These are codes that will raise an IndexError on\r\n # indexing into the CN table sparse matrix. The test\r\n # LULC raster has values from 0 to 21.\r\n bad_cn_table = cn_table[cn_table['lucode'] < 15]\r\n bad_cn_table.to_csv(bad_cn_table_path, index=False)\r\n args['curve_number_table_path'] = bad_cn_table_path\r\n\r\n with self.assertRaises(ValueError) as cm:\r\n urban_flood_risk_mitigation.execute(args)\r\n\r\n actual_message = str(cm.exception)\r\n expected_message = (\r\n f'The biophysical table is missing a row for lucode(s) '\r\n f'{[16, 17, 18, 21]}')\r\n self.assertEqual(expected_message, actual_message)\r\n\r\n def test_ufrm_string_damage_to_infrastructure(self):\r\n \"\"\"UFRM: handle str(int) structure indices.\r\n\r\n This came up on the forums, where a user had provided a string column\r\n type that contained integer data. OGR returned these ints as strings,\r\n leading to a ``KeyError``. See\r\n https://github.com/natcap/invest/issues/590.\r\n \"\"\"\r\n from natcap.invest import urban_flood_risk_mitigation\r\n\r\n srs = osr.SpatialReference()\r\n srs.ImportFromEPSG(3157)\r\n projection_wkt = srs.ExportToWkt()\r\n origin = (443723.127327877911739, 4956546.905980412848294)\r\n pos_x = origin[0]\r\n pos_y = origin[1]\r\n\r\n aoi_geometry = [\r\n shapely.geometry.box(pos_x, pos_y, pos_x + 200, pos_y + 200),\r\n ]\r\n\r\n def _infra_geom(xoff, yoff):\r\n \"\"\"Create sample infrastructure geometry at a position offset.\r\n\r\n The geometry will be centered on (x+xoff, y+yoff).\r\n\r\n Parameters:\r\n xoff (number): The x offset, referenced against ``pos_x`` from\r\n the outer scope.\r\n yoff (number): The y offset, referenced against ``pos_y`` from\r\n the outer scope.\r\n\r\n Returns:\r\n A ``shapely.Geometry`` of a point buffered by ``20`` centered\r\n on the provided (x+xoff, y+yoff) point.\r\n \"\"\"\r\n return shapely.geometry.Point(\r\n pos_x + xoff, pos_y + yoff).buffer(20)\r\n\r\n infra_geometries = [\r\n _infra_geom(x_offset, 100)\r\n for x_offset in range(0, 200, 40)]\r\n\r\n infra_fields = {'Type': ogr.OFTString} # THIS IS THE THING TESTED\r\n infra_attrs = [\r\n {'Type': str(index)} for index in range(len(infra_geometries))]\r\n\r\n infrastructure_path = os.path.join(\r\n self.workspace_dir, 'infra_vector.shp')\r\n pygeoprocessing.shapely_geometry_to_vector(\r\n infra_geometries, infrastructure_path, projection_wkt,\r\n 'ESRI Shapefile', fields=infra_fields, attribute_list=infra_attrs,\r\n ogr_geom_type=ogr.wkbPolygon)\r\n\r\n aoi_path = os.path.join(self.workspace_dir, 'aoi.shp')\r\n pygeoprocessing.shapely_geometry_to_vector(\r\n aoi_geometry, aoi_path, projection_wkt,\r\n 'ESRI Shapefile', ogr_geom_type=ogr.wkbPolygon)\r\n\r\n structures_damage_table_path = os.path.join(\r\n self.workspace_dir, 'damage_table_path.csv')\r\n with open(structures_damage_table_path, 'w') as csv_file:\r\n csv_file.write('\"Type\",\"damage\"\\n')\r\n for attr_dict in infra_attrs:\r\n type_index = int(attr_dict['Type'])\r\n csv_file.write(f'\"{type_index}\",1\\n')\r\n\r\n aoi_damage_dict = (\r\n urban_flood_risk_mitigation._calculate_damage_to_infrastructure_in_aoi(\r\n aoi_path, infrastructure_path, structures_damage_table_path))\r\n\r\n # Total damage is the sum of the area of all infrastructure geometries\r\n # that intersect the AOI, with each area multiplied by the damage cost.\r\n # For this test, damage is always 1, so it's just the intersecting\r\n # area.\r\n self.assertEqual(len(aoi_damage_dict), 1)\r\n numpy.testing.assert_allclose(aoi_damage_dict[0], 5645.787282992962)\r\n\r\n def test_ufrm_invalid_validation(self):\r\n \"\"\"UFRM: assert validation error on bad args.\"\"\"\r\n from natcap.invest import urban_flood_risk_mitigation\r\n\r\n with self.assertRaises(ValueError):\r\n urban_flood_risk_mitigation.execute({})\r\n\r\n def test_validate(self):\r\n \"\"\"UFRM: test validate function.\"\"\"\r\n from natcap.invest import urban_flood_risk_mitigation\r\n args = self._make_args()\r\n self.assertEqual(\r\n len(urban_flood_risk_mitigation.validate(args)), 0)\r\n\r\n del args['workspace_dir']\r\n validation_warnings = urban_flood_risk_mitigation.validate(args)\r\n self.assertEqual(len(validation_warnings), 1)\r\n\r\n args['workspace_dir'] = ''\r\n result = urban_flood_risk_mitigation.validate(args)\r\n self.assertTrue('has no value' in result[0][1])\r\n\r\n args = self._make_args()\r\n args['lulc_path'] = 'fake/path/notfound.tif'\r\n result = urban_flood_risk_mitigation.validate(args)\r\n self.assertTrue('not found' in result[0][1])\r\n\r\n args = self._make_args()\r\n args['lulc_path'] = args['aoi_watersheds_path']\r\n result = urban_flood_risk_mitigation.validate(args)\r\n self.assertTrue('GDAL raster' in result[0][1])\r\n\r\n args = self._make_args()\r\n args['aoi_watersheds_path'] = args['lulc_path']\r\n result = urban_flood_risk_mitigation.validate(args)\r\n self.assertTrue('GDAL vector' in result[0][1])\r\n\r\n args = self._make_args()\r\n del args['infrastructure_damage_loss_table_path']\r\n result = urban_flood_risk_mitigation.validate(args)\r\n self.assertTrue('missing from the args dict' in result[0][1])\r\n"
] | [
[
"numpy.testing.assert_allclose",
"pandas.read_csv",
"numpy.isclose",
"numpy.log"
]
] |
mihdalal/plan2explore | [
"18291bd052823a91c2cb53e31f24bae973f2361f"
] | [
"plan2explore/training/trainer.py"
] | [
"# Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\n\nimport tensorflow as tf\n\nfrom plan2explore import tools\n\n\n_Phase = collections.namedtuple(\n 'Phase',\n 'name, writer, op, batch_size, steps, feed, report_every, log_every,'\n 'checkpoint_every, restore_every')\n\n\nclass Trainer(object):\n\n def __init__(self, logdir, config=None):\n self._logdir = logdir\n self._global_step = tf.train.get_or_create_global_step()\n self._step = tf.placeholder(tf.int32, name='step')\n self._phase = tf.placeholder(tf.string, name='phase')\n self._log = tf.placeholder(tf.bool, name='log')\n self._report = tf.placeholder(tf.bool, name='report')\n self._reset = tf.placeholder(tf.bool, name='reset')\n self._phases = []\n self._epoch_store = 0\n self._globalstepsubtract = 0\n self._mod_phase_step_train = 0\n self._trainstepprev = 0\n self._mod_phase_step_test = 0\n self._teststepprev = 0\n self._epoch_delta = 0\n # Checkpointing.\n self._loaders = []\n self._savers = []\n self._logdirs = []\n self._checkpoints = []\n self._config = config or tools.AttrDict()\n\n @property\n def global_step(self):\n return self._global_step\n\n @property\n def step(self):\n return self._step\n\n @property\n def phase(self):\n return self._phase\n\n @property\n def log(self):\n return self._log\n\n @property\n def reset(self):\n return self._reset\n\n def add_saver(\n self, include=r'.*', exclude=r'.^', logdir=None, load=True, save=True,\n checkpoint=None):\n variables = tools.filter_variables(include, exclude)\n saver = tf.train.Saver(variables, max_to_keep=1)\n if load:\n self._loaders.append(saver)\n if save:\n self._savers.append(saver)\n self._logdirs.append(logdir or self._logdir)\n if checkpoint is None and self._config.checkpoint_to_load:\n self._checkpoints.append(\n os.path.join(self._logdirs[-1], self._config.checkpoint_to_load))\n else:\n self._checkpoints.append(checkpoint)\n\n def add_phase(\n self, name, steps, score, summary, batch_size=1,\n report_every=None, log_every=None, checkpoint_every=None,\n restore_every=None, feed=None):\n score = tf.convert_to_tensor(score, tf.float32)\n summary = tf.convert_to_tensor(summary, tf.string)\n feed = feed or {}\n if not score.shape.ndims:\n score = score[None]\n writer = self._logdir and tf.summary.FileWriter(\n os.path.join(self._logdir, name),\n tf.get_default_graph(), flush_secs=30)\n op = self._define_step(name, batch_size, score, summary)\n tmp_phase = tools.AttrDict()\n tmp_phase.name = name\n tmp_phase.writer = writer\n tmp_phase.op = op\n tmp_phase.batch_size = batch_size\n tmp_phase.steps = int(steps)\n tmp_phase.feed = feed\n tmp_phase.report_every = report_every\n tmp_phase.log_every = log_every\n tmp_phase.checkpoint_every = checkpoint_every\n tmp_phase.restore_every = restore_every\n self._phases.append(tmp_phase)\n\n def run(self, max_step=None, sess=None, unused_saver=None):\n for _ in self.iterate(max_step, sess):\n pass\n\n def iterate(self, max_step=None, sess=None):\n sess = sess or self._create_session()\n epoch_switch_flag = False\n with sess:\n self._initialize_variables(\n sess, self._loaders, self._logdirs, self._checkpoints)\n sess.graph.finalize()\n while True:\n global_step = sess.run(self._global_step)\n if max_step and global_step >= max_step:\n break\n phase, epoch, steps_in = self._find_current_phase(global_step)\n if phase.name=='train':\n phase_step = self._mod_phase_step_train\n self._mod_phase_step_train += self._config.batch_shape[0]\n else:\n phase_step = self._mod_phase_step_test\n self._mod_phase_step_test += self._config.batch_shape[0]\n\n if steps_in % phase.steps < phase.batch_size:\n message = '\\n' + ('-' * 50) + '\\n'\n message += 'Epoch {} phase {} (phase step {}, global step {}).'\n print(message.format(epoch + 1, phase.name, phase_step, global_step))\n # Populate book keeping tensors.\n phase.feed[self._step] = phase_step\n phase.feed[self._phase] = phase.name\n phase.feed[self._reset] = (steps_in < phase.batch_size)\n phase.feed[self._log] = phase.writer and self._is_every_steps(\n phase_step, phase.batch_size, phase.log_every)\n phase.feed[self._report] = self._is_every_steps(\n phase_step, phase.batch_size, phase.report_every)\n summary, mean_score, global_step = sess.run(phase.op, phase.feed)\n if self._config.adaptation:\n self._phases[0].steps = self._config.train_steps if global_step < self._config.adaptation_step else self._config.secondary_train_step\n if global_step < self._config.adaptation_step:\n self._epoch_delta = 0\n else:\n if not epoch_switch_flag:\n self._epoch_delta = epoch\n epoch_switch_flag = True\n\n if self._is_every_steps(\n phase_step, phase.batch_size, phase.checkpoint_every):\n for saver in self._savers:\n self._store_checkpoint(sess, saver, global_step)\n if self._is_every_steps(\n phase_step, phase.batch_size, phase.report_every):\n print('Score {}.'.format(mean_score))\n yield mean_score\n if summary and phase.writer:\n # We want smaller phases to catch up at the beginnig of each epoch so\n # that their graphs are aligned.\n longest_phase = max(phase_.steps for phase_ in self._phases)\n summary_step = epoch * longest_phase + steps_in\n phase.writer.add_summary(summary, summary_step)\n if self._is_every_steps(\n phase_step, phase.batch_size, phase.restore_every):\n self._initialize_variables(\n sess, self._loaders, self._logdirs, self._checkpoints)\n\n def _is_every_steps(self, phase_step, batch, every):\n if not every:\n return False\n covered_steps = range(phase_step, phase_step + batch)\n return any((step + 1) % every == 0 for step in covered_steps)\n\n def _find_current_phase(self, global_step):\n epoch_size = sum(phase.steps for phase in self._phases)\n if self._epoch_store==0:\n self._epoch_store = epoch_size\n change_stepsin = self._epoch_store!=epoch_size\n if change_stepsin:\n self._epoch_store = epoch_size\n self._globalstepsubtract = global_step - 50\n epoch = int((global_step-self._globalstepsubtract) // epoch_size)\n steps_in = (global_step-self._globalstepsubtract) % epoch_size\n for phase in self._phases:\n if steps_in < phase.steps:\n return phase, epoch, steps_in\n steps_in -= phase.steps\n\n def _define_step(self, name, batch_size, score, summary):\n with tf.variable_scope('phase_{}'.format(name)):\n score_mean = tools.StreamingMean((), tf.float32, 'score_mean')\n score.set_shape((None,))\n with tf.control_dependencies([score, summary]):\n submit_score = score_mean.submit(score)\n with tf.control_dependencies([submit_score]):\n mean_score = tf.cond(self._report, score_mean.clear, float)\n summary = tf.cond(\n self._report,\n lambda: tf.summary.merge([summary, tf.summary.scalar(\n name + '/score', mean_score, family='trainer')]),\n lambda: summary)\n next_step = self._global_step.assign_add(batch_size)\n with tf.control_dependencies([summary, mean_score, next_step]):\n return (\n tf.identity(summary),\n tf.identity(mean_score),\n tf.identity(next_step))\n\n def _create_session(self):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n \n try:\n return tf.Session('local', config=config)\n except tf.errors.NotFoundError:\n return tf.Session(config=config)\n\n def _initialize_variables(self, sess, savers, logdirs, checkpoints):\n sess.run(tf.group(\n tf.local_variables_initializer(),\n tf.global_variables_initializer()))\n assert len(savers) == len(logdirs) == len(checkpoints)\n for i, (saver, logdir, checkpoint) in enumerate(\n zip(savers, logdirs, checkpoints)):\n logdir = os.path.expanduser(logdir)\n state = tf.train.get_checkpoint_state(logdir)\n if checkpoint:\n checkpoint = os.path.join(logdir, checkpoint)\n if not checkpoint and state and state.model_checkpoint_path:\n checkpoint = state.model_checkpoint_path\n if checkpoint:\n saver.restore(sess, checkpoint)\n\n def _store_checkpoint(self, sess, saver, global_step):\n if not self._logdir or not saver:\n return\n tf.gfile.MakeDirs(self._logdir)\n filename = os.path.join(self._logdir, 'model.ckpt')\n saver.save(sess, filename, global_step)\n"
] | [
[
"tensorflow.control_dependencies",
"tensorflow.convert_to_tensor",
"tensorflow.get_default_graph",
"tensorflow.cond",
"tensorflow.summary.scalar",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.train.get_checkpoint_state",
"tensorflow.ConfigProto",
"tensorflow.gfile.MakeDirs",
"tensorflow.placeholder",
"tensorflow.train.get_or_create_global_step",
"tensorflow.local_variables_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.identity"
]
] |
ukky17/vitaminPrediction | [
"bfd46cc58d87c2818b9ba185b9e2d18e72ee6407"
] | [
"test.py"
] | [
"import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc\n\nimport data_utils\n\ndef draw_roc(y_test, prob_test, title):\n plt.figure(figsize=(8, 6))\n fpr, tpr, th = roc_curve(y_test, prob_test)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, 'r')\n plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n plt.xlabel('False Positive Rate', fontsize=20)\n plt.ylabel('True Positive Rate', fontsize=20)\n plt.savefig('roc_' + title + '.pdf')\n\ndef test():\n parser = argparse.ArgumentParser()\n parser.add_argument('--target', choices=['vitB1', 'vitB12', 'folate'])\n parser.add_argument('--modelType', choices=['lr', 'svc', 'rf', 'knn'])\n parser.add_argument('--reverse', action='store_true')\n opt = parser.parse_args()\n\n # threshold\n th_dict = dict()\n th_dict['vitB1'] = 30\n th_dict['vitB12'] = 180\n th_dict['folate'] = 4\n\n # load the dataset\n x_df, y_df, date = data_utils.load_dataset(target=opt.target)\n\n # preprocess the dataset\n x_data, y_data, weight = data_utils.preprocess_dataset(x_df, y_df, th=th_dict[opt.target])\n\n # split into train and test\n n_train = np.sum(date < 20170000)\n if opt.reverse:\n x_data, y_data = x_data[::-1], y_data[::-1]\n x_data, x_test, y_data, y_test = train_test_split(x_data, y_data,\n train_size=n_train,\n shuffle=False)\n\n # model\n if opt.modelType == 'lr':\n model = LogisticRegression(C=1e1, random_state=42, class_weight={1: weight})\n elif opt.modelType == 'svc':\n model = SVC(kernel='rbf', C=1e6, gamma=1e-9, class_weight={1: weight},\n probability=True, random_state=42)\n elif opt.modelType == 'rf':\n model = RandomForestClassifier(n_estimators=50,\n min_samples_split=2,\n max_depth=10,\n class_weight={1: weight},\n random_state=42)\n elif opt.modelType == 'knn':\n model = KNeighborsClassifier(algorithm='auto',\n leaf_size=1,\n metric='minkowski',\n metric_params=None,\n n_jobs=1,\n n_neighbors=37,\n p=1,\n weights='uniform')\n\n # fit and predict\n model.fit(x_data, y_data)\n prob_test = model.predict_proba(x_test)[:, 1]\n\n # evaluation\n auc_value = roc_auc_score(y_test, prob_test)\n print('AUC: {:.4f}'.format(auc_value))\n draw_roc(y_test, prob_test, opt.modelType)\n\nif __name__ == \"__main__\":\n test()\n"
] | [
[
"matplotlib.pyplot.xlim",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.savefig",
"numpy.sum",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"sklearn.svm.SVC",
"sklearn.linear_model.LogisticRegression",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.auc",
"sklearn.metrics.roc_auc_score",
"matplotlib.pyplot.xticks",
"sklearn.metrics.roc_curve"
]
] |
NakamuraTakashi/pyroms | [
"f02324a85be6c7694f83d2fb0ed0b27e869c89b9"
] | [
"pyroms_toolbox/pyroms_toolbox/remapping_tensor.py"
] | [
"# encoding: utf-8\n\nimport os\nimport numpy as np\nimport glob\ntry:\n import netCDF4 as netCDF\nexcept:\n import netCDF3 as netCDF\n\nimport pyroms\nimport pyroms_toolbox\nimport _remapping\n\nimport matplotlib.pyplot as plt\n\nimport datetime\n\ndef remapping_tensor(varname, srcfile, wts_files, srcgrd, dstgrd, \\\n rotate_sig=False, trange=None, irange=None, jrange=None, \\\n dstdir='./', shapiro=False):\n '''\n A remapping function to go from a ROMS grid to another ROMS grid.\n This is for 2D tensors: internal ice stress, hard-coding for sig11, sig22,\n sig12.\n '''\n\n # get input and output grid\n if type(srcgrd).__name__ == 'ROMS_Grid':\n srcgrd = srcgrd\n else:\n srcgrd = pyroms.grid.get_ROMS_grid(srcgrd)\n if type(dstgrd).__name__ == 'ROMS_Grid':\n dstgrd = dstgrd\n else:\n dstgrd = pyroms.grid.get_ROMS_grid(dstgrd)\n\n # varname argument\n if type(varname).__name__ == 'list':\n nvar = len(varname)\n elif type(varname).__name__ == 'str':\n varname = [varname]\n nvar = len(varname)\n else:\n raise ValueError('varname must be a str or a list of str')\n\n # srcfile argument\n if type(srcfile).__name__ == 'list':\n nfile = len(srcfile)\n elif type(srcfile).__name__ == 'str':\n srcfile = sorted(glob.glob(srcfile))\n nfile = len(srcfile)\n else:\n raise ValueError('src_srcfile must be a str or a list of str')\n\n # get wts_file\n if type(wts_files).__name__ == 'str':\n wts_files = sorted(glob.glob(wts_files))\n \n # loop over the srcfile\n for nf in range(nfile):\n print('Working with file', srcfile[nf], '...')\n\n # get time \n ocean_time = pyroms.utility.get_nc_var('ocean_time', srcfile[nf])\n ntime = len(ocean_time[:])\n\n # trange argument\n if trange is None:\n trange = list(range(ntime))\n\n # create destination file\n dstfile = dstdir + os.path.basename(srcfile[nf])[:-3] + '_' + dstgrd.name + '.nc'\n if os.path.exists(dstfile) is False:\n print('Creating destination file', dstfile)\n pyroms_toolbox.nc_create_roms_file(dstfile, dstgrd, ocean_time)\n\n # open destination file\n nc = netCDF.Dataset(dstfile, 'a', format='NETCDF3_64BIT')\n\n nctidx = 0\n # loop over time\n for nt in trange:\n\n nc.variables['ocean_time'][nctidx] = ocean_time[nt]\n\n # loop over variable\n for nv in range(nvar):\n print(' ')\n print('remapping', varname[nv], 'from', srcgrd.name, \\\n 'to', dstgrd.name)\n print('time =', ocean_time[nt]) \n\n # get source data\n src_var = pyroms.utility.get_nc_var(varname[nv], srcfile[nf])\n\n # get spval\n try:\n spval = src_var._FillValue\n except:\n raise Warning('Did not find a _FillValue attribute.') \n\n # irange\n if irange is None:\n iirange = (0,src_var.shape[-1])\n else:\n iirange = irange\n\n # jrange\n if jrange is None:\n jjrange = (0,src_var.shape[-2])\n else:\n jjrange = jrange\n\n # determine where on the C-grid these variable lies\n if src_var.dimensions[2].find('_rho') != -1:\n Cpos='rho'\n else:\n print(\"Sigma should be on rho points\")\n\n print('Arakawa C-grid position is', Cpos)\n\n # create variable in _destination file\n if nt == trange[0]:\n print('Creating variable', varname[nv])\n nc.createVariable(varname[nv], 'f8', src_var.dimensions, fill_value=spval)\n nc.variables[varname[nv]].long_name = src_var.long_name\n try:\n nc.variables[varname[nv]].units = src_var.units\n except:\n print(varname[nv]+' has no units')\n nc.variables[varname[nv]].time = src_var.time\n nc.variables[varname[nv]].coordinates = \\\n src_var.coordinates\n nc.variables[varname[nv]].field = src_var.field\n# nc.variables[varname[nv]]._FillValue = spval\n\n # get the right remap weights file\n for s in range(len(wts_files)):\n if wts_files[s].__contains__(Cpos+'_to_'+Cpos+'.nc'):\n wts_file = wts_files[s]\n break\n else:\n if s == len(wts_files) - 1:\n raise ValueError('Did not find the appropriate remap weights file')\n\n\n # write data in destination file\n# print 'write data in destination file'\n# nc.variables[varname[nv]][nctidx] = dst_var\n\n # rotate the velocity field if requested\n# print datetime.datetime.now()\n print(' ') \n print('remapping and rotating sigma from', srcgrd.name, \\\n 'to', dstgrd.name)\n\n # get source data\n src_11 = pyroms.utility.get_nc_var(varname[0], srcfile[nf])\n # get spval\n try:\n spval = src_11._FillValue\n except:\n raise Warning('Did not find a _FillValue attribute.') \n\n src_11 = src_11[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]\n\n src_22 = pyroms.utility.get_nc_var(varname[1], srcfile[nf])\n src_22 = src_22[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]\n\n src_12 = pyroms.utility.get_nc_var(varname[2], srcfile[nf])\n src_12 = src_12[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]\n\n print(\"before\", src_11[-1,30], src_12[-1,30], src_22[-1,30])\n if shapiro:\n src_11 = pyroms_toolbox.shapiro_filter.shapiro2(src_11,2)\n src_22 = pyroms_toolbox.shapiro_filter.shapiro2(src_22,2)\n src_12 = pyroms_toolbox.shapiro_filter.shapiro2(src_12,2)\n print(\"after\", src_11[-1,30], src_12[-1,30], src_22[-1,30])\n\n # horizontal interpolation using scrip weights\n print('horizontal interpolation using scrip weights')\n dst_11 = pyroms.remapping.remap(src_11, wts_file, \\\n spval=spval)\n dst_22 = pyroms.remapping.remap(src_22, wts_file, \\\n spval=spval)\n dst_12 = pyroms.remapping.remap(src_12, wts_file, \\\n spval=spval)\n print(\"after remapping\", dst_11[-1,30], dst_12[-1,30], dst_22[-1,30])\n\n if rotate_sig is True:\n # rotate stress tensor\n src_ang = srcgrd.hgrid.angle_rho[jjrange[0]:jjrange[1],iirange[0]:iirange[1]]\n src_angle = pyroms.remapping.remap(src_ang, wts_file)\n dst_angle = dstgrd.hgrid.angle_rho\n angle = dst_angle - src_angle\n cos_ang = np.cos(angle)\n sin_ang = np.sin(angle)\n Lp = cos_ang.shape[-1]\n Mp = cos_ang.shape[-2]\n print(\"Lp, Mp\", Lp, Mp)\n\n for j in range(Mp):\n for i in range(Lp):\n Qrot = [[cos_ang[j,i], sin_ang[j,i]],\n [-sin_ang[j,i], cos_ang[j,i]]]\n QrotT = [[cos_ang[j,i], -sin_ang[j,i]],\n [sin_ang[j,i], cos_ang[j,i]]]\n# Qrot = [[cos_ang[j,i], -sin_ang[j,i]],\n# [sin_ang[j,i], cos_ang[j,i]]]\n# QrotT = [[cos_ang[j,i], sin_ang[j,i]],\n# [-sin_ang[j,i], cos_ang[j,i]]]\n sig = [[dst_11[j,i], dst_12[j,i]],\n [dst_12[j,i], dst_22[j,i]]]\n sig_rot = np.dot(np.dot(Qrot, sig), QrotT)\n dst_11[j,i] = sig_rot[0,0]\n dst_12[j,i] = sig_rot[0,1]\n dst_22[j,i] = sig_rot[1,1]\n print(\"after rotating\", dst_11[-1,30], dst_12[-1,30], dst_22[-1,30])\n\n\n # spval\n idx = np.where(dstgrd.hgrid.mask_rho == 0)\n dst_11[idx[0], idx[1]] = spval\n dst_12[idx[0], idx[1]] = spval\n dst_22[idx[0], idx[1]] = spval\n\n # write data in destination file\n print('write data in destination file')\n nc.variables['sig11'][nctidx] = dst_11\n nc.variables['sig12'][nctidx] = dst_12\n nc.variables['sig22'][nctidx] = dst_22\n\n nctidx = nctidx + 1\n nc.sync()\n \n # close destination file\n nc.close()\n\n return\n"
] | [
[
"numpy.where",
"numpy.sin",
"numpy.dot",
"numpy.cos"
]
] |
tpoisonooo/ppq | [
"cba525dd0e8abe9743e02a013fcf22fb320ae07d"
] | [
"ppq/api/interface.py"
] | [
"import os\nfrom typing import Any, Callable, List\n\nimport torch\nfrom ppq.core import (NetworkFramework, TargetPlatform, empty_ppq_cache,\n ppq_warning)\nfrom ppq.executor import TorchExecutor\nfrom ppq.IR import (BaseGraph, GraphCommand, GraphCommandType, GraphFormatter,\n GraphMerger)\nfrom ppq.IR.morph import GraphDeviceSwitcher\nfrom ppq.executor.base import BaseGraphExecutor\nfrom ppq.parser import *\nfrom ppq.quantization.optim.base import QuantizationOptimizationPass\nfrom ppq.quantization.quantizer import (ACADEMIC_INT4_Quantizer,\n ACADEMIC_Mix_Quantizer,\n ACADEMICQuantizer, BaseQuantizer,\n ExtQuantizer,\n MetaxChannelwiseQuantizer,\n MetaxTensorwiseQuantizer,\n NXP_Quantizer, ORT_PerChannelQuantizer,\n ORT_PerTensorQuantizer,\n PPL_DSP_Quantizer,\n PPL_DSP_TI_Quantizer,\n PPLCUDA_INT4_Quantizer,\n PPLCUDAMixPrecisionQuantizer,\n PPLCUDAQuantizer, TensorRTQuantizer,\n FPGAQuantizer)\nfrom ppq.scheduler import DISPATCHER_TABLE, GraphDispatcher\nfrom torch.utils.data import DataLoader\n\nfrom .setting import *\n\nQUANTIZER_COLLECTION = {\n TargetPlatform.PPL_DSP_INT8: PPL_DSP_Quantizer,\n TargetPlatform.PPL_DSP_TI_INT8: PPL_DSP_TI_Quantizer,\n TargetPlatform.SNPE_INT8: PPL_DSP_Quantizer,\n TargetPlatform.QNN_DSP_INT8: PPL_DSP_Quantizer,\n TargetPlatform.TRT_INT8: TensorRTQuantizer,\n TargetPlatform.NXP_INT8: NXP_Quantizer,\n TargetPlatform.ORT_OOS_INT8: ORT_PerTensorQuantizer,\n TargetPlatform.METAX_INT8_C: MetaxChannelwiseQuantizer,\n TargetPlatform.METAX_INT8_T: MetaxTensorwiseQuantizer,\n # TargetPlatform.ORT_OOS_INT8: ORT_PerChannelQuantizer,\n TargetPlatform.PPL_CUDA_INT8: PPLCUDAQuantizer,\n TargetPlatform.EXTENSION: ExtQuantizer,\n TargetPlatform.PPL_CUDA_MIX: PPLCUDAMixPrecisionQuantizer,\n TargetPlatform.PPL_CUDA_INT4: PPLCUDA_INT4_Quantizer,\n TargetPlatform.ACADEMIC_INT8: ACADEMICQuantizer,\n TargetPlatform.ACADEMIC_INT4: ACADEMIC_INT4_Quantizer,\n TargetPlatform.ACADEMIC_MIX: ACADEMIC_Mix_Quantizer,\n TargetPlatform.FPGA_INT8 : FPGAQuantizer\n}\n\nPARSERS = {\n NetworkFramework.ONNX: OnnxParser,\n NetworkFramework.CAFFE: CaffeParser,\n NetworkFramework.NATIVE: NativeImporter\n}\n\nEXPORTERS = {\n TargetPlatform.PPL_DSP_INT8: PPLDSPCaffeExporter,\n TargetPlatform.PPL_DSP_TI_INT8: PPLDSPTICaffeExporter,\n TargetPlatform.QNN_DSP_INT8: QNNDSPExporter,\n TargetPlatform.PPL_CUDA_INT8: PPLBackendExporter,\n TargetPlatform.SNPE_INT8: SNPECaffeExporter,\n TargetPlatform.NXP_INT8: NxpExporter,\n TargetPlatform.ONNX: OnnxExporter,\n TargetPlatform.ONNXRUNTIME: ONNXRUNTIMExporter,\n TargetPlatform.CAFFE: CaffeExporter,\n TargetPlatform.NATIVE: NativeExporter,\n TargetPlatform.EXTENSION: ExtensionExporter,\n # TargetPlatform.ORT_OOS_INT8: ONNXRUNTIMExporter,\n TargetPlatform.ORT_OOS_INT8: ORTOOSExporter,\n TargetPlatform.METAX_INT8_C: MetaxExporter,\n TargetPlatform.METAX_INT8_T: MetaxExporter,\n TargetPlatform.TRT_INT8: TensorRTExporter,\n}\n\n# 为你的导出模型取一个好听的后缀名\n# postfix for exporting model\nEXPORTING_POSTFIX = {\n TargetPlatform.PPL_DSP_INT8: '.caffemodel',\n TargetPlatform.PPL_DSP_TI_INT8:'.caffemodel',\n TargetPlatform.QNN_DSP_INT8: '.onnx',\n TargetPlatform.PPL_CUDA_INT8: '.onnx',\n TargetPlatform.SNPE_INT8: '.caffemodel',\n TargetPlatform.NXP_INT8: '.caffemodel',\n TargetPlatform.ONNX: '.onnx',\n TargetPlatform.ONNXRUNTIME: '.onnx',\n TargetPlatform.CAFFE: '.caffemodel',\n TargetPlatform.NATIVE: '.native',\n TargetPlatform.EXTENSION: '.ext',\n TargetPlatform.ORT_OOS_INT8: '.onnx',\n TargetPlatform.METAX_INT8_C: '.onnx',\n TargetPlatform.METAX_INT8_T: '.onnx',\n}\n\ndef load_graph(file_path: str, from_framework: NetworkFramework=NetworkFramework.ONNX, **kwargs) -> BaseGraph:\n if from_framework not in PARSERS:\n raise KeyError(f'Requiring framework {from_framework} does not support parsing now.')\n parser = PARSERS[from_framework]()\n assert isinstance(parser, GraphBuilder), 'Unexpected Parser found.'\n if from_framework == NetworkFramework.CAFFE:\n assert 'caffemodel_path' in kwargs, ('parameter \"caffemodel_path\" is required here for loading caffe model from file, '\n 'however it is missing from your invoking.')\n graph = parser.build(prototxt_path=file_path, caffemodel_path=kwargs['caffemodel_path'])\n else:\n graph = parser.build(file_path)\n return graph\n\ndef load_onnx_graph(onnx_import_file: str) -> BaseGraph:\n \"\"\"\n 从一个指定位置加载 onnx 计算图,注意该加载的计算图尚未经过调度,此时所有算子被认为是可量化的\n load onnx graph from the specified location\n Args:\n onnx_import_file (str): onnx 计算图的保存位置 the specified location\n\n Returns:\n BaseGraph: 解析 onnx 获得的 ppq 计算图对象 the parsed ppq IR graph\n \"\"\"\n ppq_ir = load_graph(onnx_import_file, from_framework=NetworkFramework.ONNX)\n return format_graph(graph=ppq_ir)\n\ndef load_caffe_graph(prototxt_path: str, caffemodel_path: str) -> BaseGraph:\n \"\"\"\n 从一个指定位置加载 caffe 计算图,注意该加载的计算图尚未经过调度,此时所有算子被认为是可量化的\n load caffe graph from the specified location\n Args:\n prototxt_path (str): caffe prototxt的保存位置 the specified location of caffe prototxt\n caffemodel_path (str): caffe weight的保存位置 the specified lcoation of caffe weight\n\n Returns:\n BaseGraph: 解析 caffe 获得的 ppq 计算图对象 the parsed ppq IR graph\n \"\"\"\n ppq_ir = load_graph(file_path=prototxt_path, caffemodel_path=caffemodel_path, from_framework=NetworkFramework.CAFFE)\n return format_graph(graph=ppq_ir)\n\ndef dump_torch_to_onnx(\n model: torch.nn.Module, \n onnx_export_file: str, \n input_shape: List[int], \n input_dtype: torch.dtype, \n inputs: List[Any] = None,\n device: str = 'cuda'):\n \"\"\"\n 转换一个 torch 模型到 onnx,并保存到指定位置\n convert a torch model to onnx and save to the specified location\n Args:\n model (torch.nn.Module): 被转换的 torch 模型 torch model used for conversion\n\n onnx_export_file (str): 保存文件的路径 the path to save onnx model\n\n input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。\n 如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n a list of ints indicating size of input, for multiple inputs, please use keyword arg inputs for \n direct parameter passing and this should be set to None \n\n input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n the torch datatype of input, for multiple inputs, please use keyword arg inputs\n for direct parameter passing and this should be set to None\n\n inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。\n for multiple inputs, please give the specified inputs directly in the form of\n a list of arrays\n\n device (str, optional): 转换过程的执行设备 the execution device, defaults to 'cuda'.\n \"\"\"\n\n # set model to eval mode, stablize normalization weights.\n assert isinstance(model, torch.nn.Module), (\n f'Model must be instance of torch.nn.Module, however {type(model)} is given.')\n model.eval()\n\n if inputs is None:\n dummy_input = torch.zeros(size=input_shape, device=device, dtype=input_dtype)\n else: dummy_input = inputs\n\n torch.onnx.export(\n model=model, args=dummy_input,\n verbose=False, f=onnx_export_file, opset_version=11,\n )\n\n@ empty_ppq_cache\ndef quantize_onnx_model(\n onnx_import_file: str,\n calib_dataloader: DataLoader,\n calib_steps: int,\n input_shape: List[int],\n input_dtype: torch.dtype = torch.float,\n inputs: List[Any] = None,\n setting: QuantizationSetting = None,\n collate_fn: Callable = None,\n platform: TargetPlatform = TargetPlatform.PPL_DSP_INT8,\n device: str = 'cuda',\n verbose: int = 0,\n do_quantize: bool = True,\n) -> BaseGraph:\n \"\"\"\n 量化一个 onnx 原生的模型\n 输入一个 onnx 模型的文件路径\n 返回一个量化后的 PPQ.IR.BaseGraph\n quantize onnx model, input onnx model and return quantized ppq IR graph\n\n Args:\n onnx_import_file (str): 被量化的 onnx 模型文件路径 onnx model location\n \n calib_dataloader (DataLoader): 校准数据集 calibration data loader\n\n calib_steps (int): 校准步数 calibration steps\n\n collate_fn (Callable): 校准数据的预处理函数 batch collate func for preprocessing\n \n input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。\n 如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n a list of ints indicating size of input, for multiple inputs, please use \n keyword arg inputs for direct parameter passing and this should be set to None\n\n input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n the torch datatype of input, for multiple inputs, please use keyword arg inputs\n for direct parameter passing and this should be set to None\n\n inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。\n for multiple inputs, please give the specified inputs directly in the form of\n a list of arrays\n\n setting (OptimSetting): 量化配置信息,用于配置量化的各项参数,设置为 None 时加载默认参数。\n Quantization setting, default setting will be used when set None\n\n do_quantize (Bool, optional): 是否执行量化 whether to quantize the model, defaults to True.\n\n\n platform (TargetPlatform, optional): 量化的目标平台 target backend platform, defaults to TargetPlatform.DSP_INT8.\n \n device (str, optional): 量化过程的执行设备 execution device, defaults to 'cuda'.\n\n verbose (int, optional): 是否打印详细信息 whether to print details, defaults to 0.\n\n Raises:\n ValueError: 给定平台不可量化 the given platform doesn't support quantization\n KeyError: 给定平台不被支持 the given platform is not supported yet\n\n Returns:\n BaseGraph: 量化后的IR,包含了后端量化所需的全部信息 \n The quantized IR, containing all information needed for backend execution\n \"\"\"\n if not TargetPlatform.is_quantized_platform(platform=platform):\n raise ValueError(f'Target Platform {platform} is an non-quantable platform.')\n if platform not in QUANTIZER_COLLECTION:\n raise KeyError(f'Target Platform {platform} is not supported by ppq right now.')\n if do_quantize:\n if calib_dataloader is None or calib_steps is None:\n raise TypeError('Quantization needs a valid calib_dataloader and calib_steps setting.')\n\n if setting is None:\n setting = QuantizationSettingFactory.default_setting()\n\n ppq_ir = load_onnx_graph(onnx_import_file=onnx_import_file)\n ppq_ir = dispatch_graph(graph=ppq_ir, platform=platform, setting=setting)\n\n if inputs is None:\n dummy_input = torch.zeros(size=input_shape, device=device, dtype=input_dtype)\n else: dummy_input = inputs\n\n quantizer = QUANTIZER_COLLECTION[platform](graph=ppq_ir)\n\n assert isinstance(quantizer, BaseQuantizer)\n executor = TorchExecutor(graph=quantizer._graph, device=device)\n if do_quantize:\n quantizer.quantize(\n inputs=dummy_input,\n calib_dataloader=calib_dataloader,\n executor=executor,\n setting=setting,\n calib_steps=calib_steps,\n collate_fn=collate_fn\n )\n if verbose: quantizer.report()\n return quantizer._graph\n else:\n return quantizer._graph\n\n@ empty_ppq_cache\ndef quantize_torch_model(\n model: torch.nn.Module,\n calib_dataloader: DataLoader,\n calib_steps: int,\n input_shape: List[int],\n input_dtype: torch.dtype = torch.float,\n setting: QuantizationSetting = None,\n collate_fn: Callable = None,\n inputs: List[Any] = None,\n do_quantize: bool = True,\n platform: TargetPlatform = TargetPlatform.PPL_DSP_INT8,\n onnx_export_file: str = 'onnx.model',\n device: str = 'cuda',\n verbose: int = 0,\n ) -> BaseGraph:\n \"\"\"\n 量化一个 Pytorch 原生的模型\n 输入一个 torch.nn.Module\n 返回一个量化后的 PPQ.IR.BaseGraph\n \n quantize a pytorch model, input pytorch model and return quantized ppq IR graph\n Args:\n model (torch.nn.Module): 被量化的 torch 模型(torch.nn.Module) the pytorch model\n\n calib_dataloader (DataLoader): 校准数据集 calibration dataloader\n\n calib_steps (int): 校准步数 calibration steps\n\n collate_fn (Callable): 校准数据的预处理函数 batch collate func for preprocessing\n \n input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。\n 如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n a list of ints indicating size of input, for multiple inputs, please use \n keyword arg inputs for direct parameter passing and this should be set to None\n\n input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n the torch datatype of input, for multiple inputs, please use keyword arg inputs\n for direct parameter passing and this should be set to None\n\n setting (OptimSetting): 量化配置信息,用于配置量化的各项参数,设置为 None 时加载默认参数。\n Quantization setting, default setting will be used when set None\n\n inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。\n for multiple inputs, please give the specified inputs directly in the form of\n a list of arrays\n\n do_quantize (Bool, optional): 是否执行量化 whether to quantize the model, defaults to True, defaults to True.\n\n platform (TargetPlatform, optional): 量化的目标平台 target backend platform, defaults to TargetPlatform.DSP_INT8.\n \n device (str, optional): 量化过程的执行设备 execution device, defaults to 'cuda'.\n\n verbose (int, optional): 是否打印详细信息 whether to print details, defaults to 0.\n\n Raises:\n ValueError: 给定平台不可量化 the given platform doesn't support quantization\n KeyError: 给定平台不被支持 the given platform is not supported yet\n\n Returns:\n BaseGraph: 量化后的IR,包含了后端量化所需的全部信息 \n The quantized IR, containing all information needed for backend execution\n \"\"\"\n # dump pytorch model to onnx\n dump_torch_to_onnx(model=model, onnx_export_file=onnx_export_file, \n input_shape=input_shape, input_dtype=input_dtype, \n inputs=inputs, device=device)\n\n return quantize_onnx_model(onnx_import_file=onnx_export_file, \n calib_dataloader=calib_dataloader, calib_steps=calib_steps, collate_fn=collate_fn, \n input_shape=input_shape, input_dtype=input_dtype, inputs=inputs, setting=setting, \n platform=platform, device=device, verbose=verbose, do_quantize=do_quantize)\n\n@ empty_ppq_cache\ndef quantize_caffe_model(\n caffe_proto_file: str,\n caffe_model_file: str,\n calib_dataloader: DataLoader,\n calib_steps: int,\n input_shape: List[int],\n input_dtype: torch.dtype = torch.float,\n setting: QuantizationSetting = None,\n collate_fn: Callable = None,\n inputs: List[Any] = None,\n do_quantize: bool = True,\n platform: TargetPlatform = TargetPlatform.PPL_DSP_INT8,\n device: str = 'cuda',\n verbose: int = 0,\n) -> BaseGraph:\n \"\"\"\n 量化一个 caffe 原生的模型\n 输入一个 caffe 模型的文件路径和权重路径\n 返回一个量化后的 PPQ.IR.BaseGraph\n quantize caffe model, input caffe prototxt and weight path, return a quantized ppq graph\n Args:\n caffe_proto_file (str): 被量化的 caffe 模型文件 .prototxt 路径\n caffe prototxt location\n\n caffe_model_file (str): 被量化的 caffe 模型文件 .caffemodel 路径\n caffe weight location\n\n calib_dataloader (DataLoader): 校准数据集 calibration data loader\n\n calib_steps (int): 校准步数 calibration steps\n\n collate_fn (Callable): 校准数据的预处理函数 batch collate func for preprocessing\n\n input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。\n 如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n a list of ints indicating size of input, for multiple inputs, please use \n keyword arg inputs for direct parameter passing and this should be set to None\n\n input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n the torch datatype of input, for multiple inputs, please use keyword arg inputs\n for direct parameter passing and this should be set to None\n\n setting (OptimSetting): 量化配置信息,用于配置量化的各项参数,设置为 None 时加载默认参数。\n Quantization setting, default setting will be used when set None\n\n inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。\n for multiple inputs, please give the specified inputs directly in the form of\n a list of arrays\n\n do_quantize (Bool, optional): 是否执行量化 whether to quantize the model, defaults to True, defaults to True.\n\n platform (TargetPlatform, optional): 量化的目标平台 target backend platform, defaults to TargetPlatform.DSP_INT8.\n \n device (str, optional): 量化过程的执行设备 execution device, defaults to 'cuda'.\n\n verbose (int, optional): 是否打印详细信息 whether to print details, defaults to 0.\n\n Raises:\n ValueError: 给定平台不可量化 the given platform doesn't support quantization\n KeyError: 给定平台不被支持 the given platform is not supported yet\n\n Returns:\n BaseGraph: 量化后的IR,包含了后端量化所需的全部信息 \n The quantized IR, containing all information needed for backend execution\n \"\"\"\n if not TargetPlatform.is_quantized_platform(platform=platform):\n raise ValueError(f'Target Platform {platform} is an non-quantable platform.')\n if platform not in QUANTIZER_COLLECTION:\n raise KeyError(f'Target Platform {platform} is not supported by ppq right now.')\n if do_quantize:\n if calib_dataloader is None or calib_steps is None:\n raise TypeError('Quantization needs a valid calib_dataloader and calib_steps setting.')\n \n if setting is None:\n setting = QuantizationSettingFactory.default_setting()\n\n ppq_ir = load_graph(file_path=caffe_proto_file, \n caffemodel_path=caffe_model_file, \n from_framework=NetworkFramework.CAFFE)\n \n ppq_ir = format_graph(ppq_ir)\n ppq_ir = dispatch_graph(ppq_ir, platform, setting)\n\n if inputs is None:\n dummy_input = torch.zeros(size=input_shape, device=device, dtype=input_dtype)\n else: dummy_input = inputs\n\n quantizer = QUANTIZER_COLLECTION[platform](graph=ppq_ir)\n\n assert isinstance(quantizer, BaseQuantizer)\n executor = TorchExecutor(graph=quantizer._graph, device=device)\n if do_quantize:\n quantizer.quantize(\n inputs=dummy_input,\n calib_dataloader=calib_dataloader,\n executor=executor,\n setting=setting,\n calib_steps=calib_steps,\n collate_fn=collate_fn\n )\n if verbose: quantizer.report()\n return quantizer._graph\n else:\n return quantizer._graph\n\n@ empty_ppq_cache\ndef quantize_native_model(\n model: BaseGraph,\n calib_dataloader: DataLoader,\n calib_steps: int,\n input_shape: List[int],\n input_dtype: torch.dtype = torch.float,\n inputs: List[Any] = None,\n setting: QuantizationSetting = None,\n collate_fn: Callable = None,\n platform: TargetPlatform = TargetPlatform.PPL_DSP_INT8,\n device: str = 'cuda',\n verbose: int = 0,\n do_quantize: bool = True,\n) -> BaseGraph:\n \"\"\"\n 量化一个已经在内存中的 ppq 模型\n 输入一个量化前的 PPQ.IR.BaseGraph\n 返回一个量化后的 PPQ.IR.BaseGraph\n quantize ppq model, input ppq graph and return quantized ppq graph\n\n Args:\n native (BaseGraph): 被量化的 ppq graph\n \n calib_dataloader (DataLoader): 校准数据集 calibration data loader\n\n calib_steps (int): 校准步数 calibration steps\n\n collate_fn (Callable): 校准数据的预处理函数 batch collate func for preprocessing\n \n input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。\n 如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n a list of ints indicating size of input, for multiple inputs, please use \n keyword arg inputs for direct parameter passing and this should be set to None\n\n input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None\n the torch datatype of input, for multiple inputs, please use keyword arg inputs\n for direct parameter passing and this should be set to None\n\n inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。\n for multiple inputs, please give the specified inputs directly in the form of\n a list of arrays\n\n setting (OptimSetting): 量化配置信息,用于配置量化的各项参数,设置为 None 时加载默认参数。\n Quantization setting, default setting will be used when set None\n\n do_quantize (Bool, optional): 是否执行量化 whether to quantize the model, defaults to True.\n\n\n platform (TargetPlatform, optional): 量化的目标平台 target backend platform, defaults to TargetPlatform.DSP_INT8.\n \n device (str, optional): 量化过程的执行设备 execution device, defaults to 'cuda'.\n\n verbose (int, optional): 是否打印详细信息 whether to print details, defaults to 0.\n\n Raises:\n ValueError: 给定平台不可量化 the given platform doesn't support quantization\n KeyError: 给定平台不被支持 the given platform is not supported yet\n\n Returns:\n BaseGraph: 量化后的IR,包含了后端量化所需的全部信息 \n The quantized IR, containing all information needed for backend execution\n \"\"\"\n if not TargetPlatform.is_quantized_platform(platform=platform):\n raise ValueError(f'Target Platform {platform} is an non-quantable platform.')\n if platform not in QUANTIZER_COLLECTION:\n raise KeyError(f'Target Platform {platform} is not supported by ppq right now.')\n if do_quantize:\n if calib_dataloader is None or calib_steps is None:\n raise TypeError('Quantization needs a valid calib_dataloader and calib_steps setting.')\n\n if setting is None:\n setting = QuantizationSettingFactory.default_setting()\n ppq_ir = dispatch_graph(graph=model, platform=platform, setting=setting)\n\n if inputs is None:\n dummy_input = torch.zeros(size=input_shape, device=device, dtype=input_dtype)\n else: dummy_input = inputs\n\n quantizer = QUANTIZER_COLLECTION[platform](graph=ppq_ir)\n\n assert isinstance(quantizer, BaseQuantizer)\n executor = TorchExecutor(graph=quantizer._graph, device=device)\n if do_quantize:\n quantizer.quantize(\n inputs=dummy_input,\n calib_dataloader=calib_dataloader,\n executor=executor,\n setting=setting,\n calib_steps=calib_steps,\n collate_fn=collate_fn\n )\n if verbose: quantizer.report()\n return quantizer._graph\n else:\n return quantizer._graph\n\n\ndef export_ppq_graph(\n graph: BaseGraph, \n platform: TargetPlatform, \n graph_save_to: str, \n config_save_to: str = None, \n **kwargs) -> None:\n \"\"\"\n 使用这个函数将 PPQ ir 保存到文件,同时导出 PPQ 的量化配置信息。\n 该函数可以将 PPQ ir 保存为不同格式的模型文件。\n this func dumps ppq IR to file, and exports quantization setting information simultaneously\n\n 详细的支持情况请参考: ppq.parser.__ini__.py\n for details please refer to ppq.parser.__ini__.py\n\n Args:\n graph (BaseGraph): 被保存的 ir \n the ppq IR graph\n\n platform (TargetPlatform): 期望部署的目标平台\n target backend platform\n\n graph_save_to (str): 模型保存文件名,不要写后缀名,ppq 会自己加后缀\n filename to save, do not add postfix to this\n\n config_save_to (str): 量化配置信息保存文件名。\n 注意部分平台导出时会将量化配置信息直接写入模型,在这种情况下设置此参数无效\n note that some of platforms requires to write quantization setting\n directly into the model file, this parameter won't have effect at\n this situation\n \"\"\"\n postfix = ''\n if platform in EXPORTING_POSTFIX:\n postfix = EXPORTING_POSTFIX[platform]\n graph_save_to += postfix\n\n for save_path in [graph_save_to, config_save_to]:\n if save_path is None: continue\n if os.path.exists(save_path):\n if os.path.isfile(save_path):\n ppq_warning(f'File {save_path} has already exist, ppq exporter will overwrite it.')\n if os.path.isdir(save_path):\n raise FileExistsError(f'File {save_path} has already exist, and it is a directory, '\n 'ppq exporter can not create file here.')\n\n if platform not in EXPORTERS:\n raise KeyError(f'Requiring framework {platform} does not support export now.')\n exporter = EXPORTERS[platform]()\n assert isinstance(exporter, GraphExporter), 'Unexpected Exporter found.'\n exporter.export(file_path=graph_save_to, config_path=config_save_to, graph=graph, **kwargs)\n\n\ndef format_graph(graph: BaseGraph) -> BaseGraph:\n \"\"\"\n\n 这个函数对计算图进行预处理工作,其主要内容是将计算图的格式进行统一\n 这个函数将会统一 cast, slice, parameter, constant 算子的格式,并且执行有关 batchnorm 的合并工作\n \n 在 PPQ 中,我们不希望出现 Constant 算子,所有 Constant 输入将被当作 parameter variable 连接到下游算子上\n 在 PPQ 中,我们不希望出现 Batchnorm 算子,所有 Batchnorm 将被合并\n 在 PPQ 中,我们不希望出现权重共享的算子,所有被共享的权重将被复制分裂成多份\n 在 PPQ 中,我们不希望出现孤立算子,所有孤立算子将被移除\n \n This function takes pre-processing procedure with your graph.\n This function will convert operations like cast, slice, parameter, constant to the format that supported by ppq.\n This function will merge batchnorm when possible.\n \n During quantization logic, we do not expect there is any constant operation in your network, so\n all of them will be converted as parameter input variable.\n \n We do not expect there is any shared parameter in your network, all of them will be copied and spilted.\n We do not expect any isolated operation in your network, all of them will be removed.\n\n \"\"\"\n\n # do graph level optimization\n formatter = GraphFormatter(GraphMerger(graph))\n\n formatter(GraphCommand(GraphCommandType.FORMAT_CONSTANT_INPUT))\n formatter(GraphCommand(GraphCommandType.FUSE_BN))\n formatter(GraphCommand(GraphCommandType.FORMAT_PARAMETERS))\n formatter(GraphCommand(GraphCommandType.FORMAT_CAST))\n formatter(GraphCommand(GraphCommandType.FORMAT_SLICE))\n formatter(GraphCommand(GraphCommandType.FORMAT_CLIP))\n formatter(GraphCommand(GraphCommandType.DELETE_ISOLATED))\n\n return graph\n\n\ndef dispatch_graph(graph: BaseGraph, platform: TargetPlatform, setting: QuantizationSetting) -> BaseGraph:\n \"\"\"\n \n 这个函数执行图切分与调度,你的计算图将被切分成一系列子图,并被调度到不同设备上。\n 调度的逻辑分为自动控制的部分以及手动覆盖的部分,你可以使用 QuantizationSetting 来向这个函数传递手动调度表\n 从而覆盖 PPQ 的调度逻辑。\n \n 注意:这个函数依据调度器和TargetPlatform 平台的不同而产生行为差异,生成不同的调度计划。\n \n This function will cut your graph into a series of subgraph and send them to different device.\n PPQ provides an automatic dispatcher which, will generate different dispatching scheme on your TargetPlatform.\n A dispatching table can be passed via QuantizationSetting to override \n the default dispatching logic of ppq dispatcher manually.\n\n \"\"\"\n assert platform in QUANTIZER_COLLECTION, (\n f'Platform misunderstood, except one of following platform {QUANTIZER_COLLECTION.keys()}')\n quantizer = QUANTIZER_COLLECTION[platform](graph) # 初始化一个 quantizer 没有很大代价...\n \n if str(setting.dispatcher).lower() not in DISPATCHER_TABLE:\n raise ValueError(f'Can not found dispatcher type \"{setting.dispatcher}\", check your input again.')\n dispatcher = DISPATCHER_TABLE[str(setting.dispatcher).lower()]()\n assert isinstance(dispatcher, GraphDispatcher)\n assert isinstance(quantizer, BaseQuantizer)\n quant_types = quantizer.quant_operation_types\n\n dispatching_table = dispatcher.dispatch(\n graph=graph, quant_types=quant_types, \n quant_platform=TargetPlatform.UNSPECIFIED, # MUST BE UNSPECIFIED, 这里的意思是交由 Quantizer 决定是否量化这个算子\n fp32_platform=TargetPlatform.FP32, \n SOI_platform=TargetPlatform.SHAPE_OR_INDEX)\n\n # override dispatching result with setting\n dispatching_override = setting.dispatching_table\n for opname, platform in dispatching_override.dispatchings.items():\n if opname not in graph.operations: continue\n assert isinstance(platform, int), (\n f'Your dispatching table contains a invalid setting of operation {opname}, '\n 'All platform setting given in dispatching table is expected given as int, '\n f'however {type(platform)} was given.')\n dispatching_table[opname] = TargetPlatform(platform)\n \n for operation in graph.operations.values():\n assert operation.name in dispatching_table, (\n f'Internal Error, Can not find operation {operation.name} in dispatching table.')\n operation.platform = dispatching_table[operation.name]\n \n # insert necessary device switchers.\n formatter = GraphDeviceSwitcher(graph)\n formatter(GraphCommand(GraphCommandType.INSERT_SWITCHER))\n return graph\n \n\nclass UnbelievableUserFriendlyQuantizationSetting:\n \"\"\"\n 量化配置文件 -- 入门版\n\n 这个文件包含了最基本的量化配置。\n \"\"\"\n \n def __init__(self, platform: TargetPlatform, finetune_steps: int = 5000, finetune_lr: float = 3e-4,\n interested_outputs: List[str] = None, calibration: str = 'percentile', equalization: bool = True,\n non_quantable_op: List[str] = None) -> None:\n \"\"\"\n 量化配置文件 -- 入门版\n\n 这个文件包含了最基本的量化配置。\n\n Args:\n platform (TargetPlatform): 目标量化平台\n finetune_steps (int, optional): 网络 finetune 步数. Defaults to 5000.\n finetune_lr (float, optional): 网络 finetune 学习率. Defaults to 3e-4.\n interested_outputs (List[str], optional): 用来finetune的variable名字,请注意对于静态图而言其总是由 op 和 variable 组成的,\n 有时候你的网络输出并不是可导的,或者是一个softmax或者sigmoid的输出,这些时候finetune的结果不会很好,你可以通过这个属性\n 来指定一个variable的名字,我们将用这个variable的输出结果来引导finetune流程,当然一个variable list也是可以的。\n equalization (bool, optional): 是否要拉平网络权重. Defaults to True.\n non_quantable_op (List[str], optional): 非量化算子集合,所有名字出现在该集合里的算子将不被量化. Defaults to None.\n \"\"\"\n self.equalization = equalization\n self.finetune_steps = finetune_steps\n self.finetune_lr = finetune_lr\n self.calibration = calibration\n self.platform = platform\n self.non_quantable_op = non_quantable_op\n self.interested_outputs = interested_outputs\n\n if isinstance(self.non_quantable_op, str): self.non_quantable_op = [self.non_quantable_op]\n if isinstance(self.interested_outputs, str): self.interested_outputs = [self.interested_outputs]\n\n def convert_to_daddy_setting(self) -> QuantizationSetting:\n # 将菜鸡版量化配置转换成高级版的\n daddy = QuantizationSettingFactory.default_setting()\n daddy.quantize_activation_setting.calib_algorithm = self.calibration\n \n if self.platform in {TargetPlatform.PPL_CUDA_INT4, TargetPlatform.PPL_CUDA_INT8}:\n daddy.fusion_setting.fuse_conv_add = True\n else: daddy.fusion_setting.fuse_conv_add = False\n\n if self.platform in {TargetPlatform.METAX_INT8_C, TargetPlatform.METAX_INT8_T}:\n daddy.fusion_setting.force_alignment_overlap = True\n\n if self.finetune_steps > 0:\n daddy.advanced_optimization = True\n daddy.advanced_optimization_setting.steps = self.finetune_steps\n daddy.advanced_optimization_setting.lr = self.finetune_lr\n daddy.advanced_optimization_setting.limit = 2.0\n daddy.advanced_optimization_setting.interested_outputs = self.interested_outputs\n\n if self.equalization == True:\n daddy.equalization = True\n daddy.equalization_setting.iterations = 3\n daddy.equalization_setting.opt_level = 1\n daddy.equalization_setting.value_threshold = 0\n\n if self.non_quantable_op is not None:\n for op_name in self.non_quantable_op:\n assert isinstance(op_name, str), (\n f'你尝试使用 non_quantable_op 来设定非量化算子,'\n f'non_quantable_op 只应当包含算子的名字,而你传入的数据中包括了 {type(op_name)}')\n daddy.dispatching_table.append(op_name, TargetPlatform.FP32)\n \n return daddy\n\n def to_json(self, file_path: str) -> str:\n if os.path.exists(file_path):\n if os.path.isdir(file_path): \n raise FileExistsError(f'文件 {file_path} 已经存在且是一个目录,无法将配置文件写入到该位置!')\n ppq_warning(f'文件 {file_path} 已经存在并将被覆盖')\n\n # TargetPlatform is not a native type, convert it to string.\n dump_dict = self.__dict__.copy()\n dump_dict['platform'] = self.platform.name\n\n with open(file_path, 'w', encoding='utf-8') as file:\n json.dump(obj=dump_dict, fp=file, sort_keys=True, indent=4, ensure_ascii=False)\n\n @ staticmethod\n def from_file(file_path: str):\n if not os.path.exists(file_path):\n raise FileNotFoundError('找不到你的配置文件,检查配置文件路径是否正确!')\n with open(file_path, 'r', encoding='utf-8') as file:\n loaded = json.load(file)\n assert isinstance(loaded, dict), 'Json文件无法解析,格式不正确'\n assert 'platform' in loaded, 'Json文件缺少必要项目 \"platform\"'\n \n platform = loaded['platform']\n if platform in TargetPlatform._member_names_:\n platform = TargetPlatform._member_map_[platform]\n else: raise KeyError('无法解析你的json配置文件,遇到了未知的platform属性。')\n \n setting = UnbelievableUserFriendlyQuantizationSetting(platform)\n for key, value in loaded.items():\n if key == 'platform': continue\n if key in setting.__dict__: setting.__dict__[key] = value\n if key not in setting.__dict__: ppq_warning(f'你的Json文件中包含无法解析的属性 {key} ,该属性已经被舍弃')\n assert isinstance(setting, UnbelievableUserFriendlyQuantizationSetting)\n return setting\n\n def __str__(self) -> str:\n return str(self.__dict__)\n\n\ndef quantize(working_directory: str, setting: QuantizationSetting, model_type: NetworkFramework,\n executing_device: str, input_shape: List[int], target_platform: TargetPlatform,\n dataloader: DataLoader, calib_steps: int = 32) -> BaseGraph:\n \"\"\"\n Helper function for quantize your model within working directory,\n This function will do some check and redirect your requirement to: \n ppq.api.quantize_onnx_model\n ppq.api.quantize_caffe_model\n \n see them for more information.\n \n Args:\n working_directory (str): A path that indicates working directory.\n setting (QuantizationSetting): Quantization setting\n model_type (NetworkFramework): Onnx or Caffe\n executing_device (str): 'cuda' or 'cpu'\n input_shape (List[int]): sample input's shape\n target_platform (TargetPlatform): Target deploy platform\n dataloader (DataLoader): calibraiton dataloader\n calib_steps (int, optional): Defaults to 32.\n\n Raises:\n FileNotFoundError: _description_\n FileNotFoundError: _description_\n\n Returns:\n BaseGraph: _description_\n \"\"\"\n if model_type == NetworkFramework.ONNX:\n if not os.path.exists(os.path.join(working_directory, 'model.onnx')):\n raise FileNotFoundError(f'无法找到你的模型: {os.path.join(working_directory, \"model.onnx\")},'\n '如果你使用caffe的模型, 请设置MODEL_TYPE为CAFFE')\n return quantize_onnx_model(\n onnx_import_file=os.path.join(working_directory, 'model.onnx'),\n calib_dataloader=dataloader, calib_steps=calib_steps, input_shape=input_shape, setting=setting,\n platform=target_platform, device=executing_device, collate_fn=lambda x: x.to(executing_device)\n )\n if model_type == NetworkFramework.CAFFE:\n if not os.path.exists(os.path.join(working_directory, 'model.caffemodel')):\n raise FileNotFoundError(f'无法找到你的模型: {os.path.join(working_directory, \"model.caffemodel\")},'\n '如果你使用ONNX的模型, 请设置MODEL_TYPE为ONNX')\n return quantize_caffe_model(\n caffe_proto_file=os.path.join(working_directory, 'model.prototxt'),\n caffe_model_file=os.path.join(working_directory, 'model.caffemodel'),\n calib_dataloader=dataloader, calib_steps=calib_steps, input_shape=input_shape, setting=setting,\n platform=target_platform, device=executing_device, collate_fn=lambda x: x.to(executing_device)\n )\n\n\ndef export(working_directory: str, quantized: BaseGraph, platform: TargetPlatform, **kwargs):\n \"\"\"\n Helper function to export your graph to working directory,\n You should notice this function just redirect your invoking to export_ppq_graph.\n see export_ppq_graph for more information.\n\n Args:\n working_directory (str): _description_\n quantized (BaseGraph): _description_\n platform (TargetPlatform): _description_\n \"\"\"\n export_ppq_graph(\n graph=quantized, platform=platform,\n graph_save_to=os.path.join(working_directory, 'quantized'),\n config_save_to=os.path.join(working_directory, 'quantized.json'),\n **kwargs\n )\n\n\ndef manop(graph: BaseGraph, list_of_passes: List[QuantizationOptimizationPass], \n calib_dataloader: Iterable, executor: BaseGraphExecutor,\n collate_fn: Callable = None, **kwargs) -> BaseGraph:\n \"\"\"\n manop 是一个很方便的函数,你可以调用这个函数来手动地执行一些量化优化工作\n 你可以在默认量化逻辑之前或之后调用这个函数来自定义量化处理流程,相比于直接实现\n Quantizer来修改量化逻辑的方式, 使用 manop 会更加灵活。\n \n MANOP (manually optimize) function is introduced since PPQ 0.6.4.\n This function allows you to manually invoke \n QuantizationOptimizationPass before or after default quantization logic.\n \n We do not use function name like apply, optim, do ... \n Because they are so widely-used in other python libraries, \n and can easily conflict with each other.\n\n Args:\n graph (BaseGraph): processing graph.\n list_of_passes (List[QuantizationOptimizationPass]): a collection of optimization logic.\n calib_dataloader (Iterable): _description_\n executor (BaseGraphExecutor): _description_\n collate_fn (Callable): _description_\n\n Raises:\n TypeError: _description_\n TypeError: _description_\n\n Returns:\n BaseGraph: processed graph\n \"\"\"\n if isinstance(list_of_passes, QuantizationOptimizationPass):\n list_of_passes = [list_of_passes]\n\n if not (isinstance(list_of_passes, list) or isinstance(list_of_passes, tuple)):\n raise TypeError('Can not apply optimization on your graph, '\n 'expect a list of QuantizationOptimizationPass as input, '\n f'while {type(list_of_passes)} was given.')\n\n for optim in list_of_passes:\n if not isinstance(optim, QuantizationOptimizationPass):\n raise TypeError('Invoking this function needs a list of QuantizationOptimizationPass, '\n f'however there is a/an {type(optim)} in your list')\n optim.apply(graph, dataloader=calib_dataloader, executor=executor, collate_fn=collate_fn, **kwargs)\n return graph\n\n\n__all__ = ['load_graph', 'load_onnx_graph', 'load_caffe_graph',\n 'dispatch_graph', 'dump_torch_to_onnx', 'quantize_onnx_model', \n 'quantize_torch_model', 'quantize_caffe_model', \n 'export_ppq_graph', 'format_graph', 'quantize', 'export', \n 'UnbelievableUserFriendlyQuantizationSetting', 'manop', \n 'quantize_native_model']\n"
] | [
[
"torch.zeros",
"torch.onnx.export"
]
] |
nishimoto/py_r_stats | [
"1e4a2dbe68feb25d34f2cbf1a769cb44238ae6cd"
] | [
"visualization/plt.mnist.py"
] | [
"#!/usr/bin/env python\n#!pip install japanize_matplotlib # プロット中で日本語を使用するためにインストール\n\nimport itertools\nimport pandas as pd\nimport matplotlib.pylab as plt\nfrom IPython.display import display\n\ndf = pd.read_csv(\"mnist_train.csv\", header=None, index_col=0)\ndisplay(df.head())\n\nxs = []\nys = []\nfor x, y in itertools.permutations(range(28), 2):\n iloc = y * 28 + x\n if df.iloc[0, iloc] != 0:\n xs.append(x)\n ys.append(28 - y)\nplt.scatter(xs, ys)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pylab.scatter"
]
] |
etiennegalea/braindecode | [
"5753ea8b255e2dd2f120df80116ec1bd4c199f78"
] | [
"datasets/sleep_physionet.py"
] | [
"# Authors: Hubert Banville <[email protected]>\n#\n# License: BSD (3-clause)\n\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport mne\nfrom mne.datasets.sleep_physionet.age import fetch_data\n\nfrom .base import BaseDataset, BaseConcatDataset\n\n\nclass SleepPhysionet(BaseConcatDataset):\n \"\"\"Sleep Physionet dataset.\n\n Sleep dataset from https://physionet.org/content/sleep-edfx/1.0.0/.\n Contains overnight recordings from 78 healthy subjects.\n\n See [MNE example](https://mne.tools/stable/auto_tutorials/sample-datasets/plot_sleep.html).\n\n Parameters\n ----------\n subject_ids: list(int) | int | None\n (list of) int of subject(s) to be loaded. If None, load all available\n subjects.\n recording_ids: list(int) | None\n Recordings to load per subject (each subject except 13 has two\n recordings). Can be [1], [2] or [1, 2] (same as None).\n preload: bool\n If True, preload the data of the Raw objects.\n load_eeg_only: bool\n If True, only load the EEG channels and discard the others (EOG, EMG,\n temperature, respiration) to avoid resampling the other signals.\n crop_wake_mins: float\n Number of minutes of wake time to keep before the first sleep event\n and after the last sleep event. Used to reduce the imbalance in this\n dataset. Default of 30 mins.\n sfreq: float\n If sfreq is not None, resample to the raw signal to the specified\n value.\n n_jobs: int\n Number of jobs (cpu threads) for resampling function.\n \"\"\"\n def __init__(self, subject_ids=None, recording_ids=None, preload=False,\n load_eeg_only=True, crop_wake_mins=30, sfreq=None, n_jobs=1):\n if subject_ids is None:\n subject_ids = range(83)\n if recording_ids is None:\n recording_ids = [1, 2]\n\n paths = fetch_data(\n subject_ids, recording=recording_ids, on_missing='warn')\n\n all_base_ds = list()\n for p in paths:\n raw, desc = self._load_raw(\n p[0], p[1], preload=preload, load_eeg_only=load_eeg_only,\n crop_wake_mins=crop_wake_mins, sfreq=sfreq, n_jobs=n_jobs)\n base_ds = BaseDataset(raw, desc)\n all_base_ds.append(base_ds)\n super().__init__(all_base_ds)\n\n @staticmethod\n def _load_raw(raw_fname, ann_fname, preload, load_eeg_only=True,\n crop_wake_mins=False, sfreq=None, n_jobs=1):\n ch_mapping = {\n 'EOG horizontal': 'eog',\n 'Resp oro-nasal': 'misc',\n 'EMG submental': 'misc',\n 'Temp rectal': 'misc',\n 'Event marker': 'misc'\n }\n exclude = list(ch_mapping.keys()) if load_eeg_only else ()\n\n raw = mne.io.read_raw_edf(raw_fname, preload=preload, exclude=exclude)\n # resample if sfreq is different then registered\n if sfreq is not None and sfreq != raw.info['sfreq']:\n print(f'TO resample: {sfreq}')\n print(f'Sampling rate before: {raw.info[\"sfreq\"]}')\n raw = mne.io.Raw.resample(raw, sfreq, n_jobs=n_jobs)\n print(f'Sampling rate after: {raw.info[\"sfreq\"]}')\n annots = mne.read_annotations(ann_fname)\n raw.set_annotations(annots, emit_warning=False)\n\n if crop_wake_mins > 0:\n # Find first and last sleep stages\n mask = [\n x[-1] in ['1', '2', '3', '4', 'R'] for x in annots.description]\n sleep_event_inds = np.where(mask)[0]\n\n # Crop raw\n tmin = annots[int(sleep_event_inds[0])]['onset'] - crop_wake_mins * 60\n tmax = annots[int(sleep_event_inds[-1])]['onset'] + crop_wake_mins * 60\n raw.crop(tmin=max(tmin, raw.times[0]),\n tmax=min(tmax, raw.times[-1]))\n\n # Rename EEG channels\n ch_names = {\n i: i.replace('EEG ', '') for i in raw.ch_names if 'EEG' in i}\n raw.rename_channels(ch_names)\n\n if not load_eeg_only:\n raw.set_channel_types(ch_mapping)\n\n basename = os.path.basename(raw_fname)\n subj_nb = int(basename[3:5])\n sess_nb = int(basename[5])\n desc = pd.Series({'subject': subj_nb, 'recording': sess_nb}, name='')\n\n return raw, desc\n"
] | [
[
"numpy.where",
"pandas.Series"
]
] |
XeniaLLL/GDN | [
"931b940f938041c092f2e133d6e6fe7bc4f8c127"
] | [
"models/graph_layer.py"
] | [
"import torch\nfrom torch.nn import Parameter, Linear, Sequential, BatchNorm1d, ReLU\nimport torch.nn.functional as F\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, softmax\n\nfrom torch_geometric.nn.inits import glorot, zeros\nimport time\nimport math\n\nclass GraphLayer(MessagePassing):\n def __init__(self, in_channels, out_channels, heads=1, concat=True,\n negative_slope=0.2, dropout=0, bias=True, inter_dim=-1,**kwargs):\n super(GraphLayer, self).__init__(aggr='add', **kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.heads = heads\n self.concat = concat\n self.negative_slope = negative_slope\n self.dropout = dropout\n self.node_dim=0\n\n self.__alpha__ = None\n\n self.lin = Linear(in_channels, heads * out_channels, bias=False)\n\n self.att_i = Parameter(torch.Tensor(1, heads, out_channels))\n self.att_j = Parameter(torch.Tensor(1, heads, out_channels))\n self.att_em_i = Parameter(torch.Tensor(1, heads, out_channels))\n self.att_em_j = Parameter(torch.Tensor(1, heads, out_channels))\n\n if bias and concat:\n self.bias = Parameter(torch.Tensor(heads * out_channels))\n elif bias and not concat:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.lin.weight)\n glorot(self.att_i)\n glorot(self.att_j)\n \n zeros(self.att_em_i)\n zeros(self.att_em_j)\n\n zeros(self.bias)\n\n\n\n def forward(self, x, edge_index, embedding, return_attention_weights=False):\n \"\"\"\"\"\"\n if torch.is_tensor(x):\n x = self.lin(x)\n x = (x, x)\n else:\n x = (self.lin(x[0]), self.lin(x[1]))\n\n edge_index, _ = remove_self_loops(edge_index)\n edge_index, _ = add_self_loops(edge_index,\n num_nodes=x[1].size(self.node_dim))\n\n out = self.propagate(edge_index, x=x, embedding=embedding, edges=edge_index,\n return_attention_weights=return_attention_weights)\n\n if self.concat:\n out = out.view(-1, self.heads * self.out_channels)\n else:\n out = out.mean(dim=1)\n\n if self.bias is not None:\n out = out + self.bias\n\n if return_attention_weights:\n alpha, self.__alpha__ = self.__alpha__, None\n return out, (edge_index, alpha)\n else:\n return out\n\n def message(self, x_i, x_j, edge_index_i, size_i,\n embedding,\n edges,\n return_attention_weights):\n\n x_i = x_i.view(-1, self.heads, self.out_channels)\n x_j = x_j.view(-1, self.heads, self.out_channels)\n\n if embedding is not None:\n embedding_i, embedding_j = embedding[edge_index_i], embedding[edges[0]]\n embedding_i = embedding_i.unsqueeze(1).repeat(1,self.heads,1)\n embedding_j = embedding_j.unsqueeze(1).repeat(1,self.heads,1)\n\n key_i = torch.cat((x_i, embedding_i), dim=-1)\n key_j = torch.cat((x_j, embedding_j), dim=-1)\n\n\n\n cat_att_i = torch.cat((self.att_i, self.att_em_i), dim=-1)\n cat_att_j = torch.cat((self.att_j, self.att_em_j), dim=-1)\n\n alpha = (key_i * cat_att_i).sum(-1) + (key_j * cat_att_j).sum(-1)\n\n\n alpha = alpha.view(-1, self.heads, 1)\n\n\n alpha = F.leaky_relu(alpha, self.negative_slope)\n alpha = softmax(alpha, edge_index_i, size_i)\n\n if return_attention_weights:\n self.__alpha__ = alpha\n\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\n \n return x_j * alpha.view(-1, self.heads, 1)\n\n\n\n def __repr__(self):\n return '{}({}, {}, heads={})'.format(self.__class__.__name__,\n self.in_channels,\n self.out_channels, self.heads)\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.is_tensor",
"torch.nn.functional.dropout",
"torch.Tensor",
"torch.nn.functional.leaky_relu"
]
] |
nertsam/DAGsched | [
"dcd431384fe1dd3f0f55a9287fc980f2b445beaf"
] | [
"model/plots/plot_design.py"
] | [
"from __future__ import division\nfrom matplotlib import rcParams\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport numpy as np\nimport itertools\n\nrcParams['ps.useafm'] = True\nrcParams['pdf.use14corefonts'] = True\nrcParams['text.usetex'] = True\n\n\ndef plot(filegroups):\n \n names = []\n labels = ['(a) m = 8', '(b) m = 16', '(c) m = 32']\n lines = []\n \n fig, axes = plt.subplots(1, 3, sharey=True, sharex=True)\n plt.subplots_adjust(top = 0.9, bottom = 0.47, left = 0.08, right = 0.98, wspace=0.05, hspace=0.2)\n axes[0].set_ylabel('Acceptance Ratio (\\%)',size=13)\n fig.text(0.5, 0.4, 'Utilization (\\%)', size=13, ha='center')\n\n for i in range(3):\n axes[i].spines['top'].set_color('none')\n axes[i].spines['bottom'].set_color('none')\n axes[i].spines['left'].set_color('none')\n axes[i].spines['right'].set_color('none')\n axes[i].tick_params(labelcolor = 'black', top = 'off', bottom = 'off', left = 'off', right = 'off')\n axes[i].grid()\n\n for i, filegroup in enumerate(filegroups):\n marker = itertools.cycle(('o', 'd', '+', 'v','h','D','x'))\n colors = itertools.cycle(('y','g','black','b','r','b','y','r'))\n for filename in filegroup:\n # load dataset\n dataset = np.load(filename)\n dataset = dataset.item()\n # basic setting\n stepsize = dataset['step_size'] \n setsize = dataset['set_size']\n simsize = dataset['sim_size']\n\n utilization = map(lambda u : 100.0 * u, np.arange(stepsize, 1.0 + stepsize, stepsize))\n acceptance = map(lambda failed : 100.0 * (simsize - failed)/simsize, dataset['results'])\n axes[i].axis([-2,102,-2,102])\n axes[i].plot(utilization, acceptance, '-', color = colors.next(), marker = marker.next(), markersize = 7, fillstyle = 'none', markevery = 1, label = dataset['id'], linewidth = 1.5)\n axes[i].tick_params(labelcolor='k', top='off', bottom='off', left='off', right='off')\n names.append(dataset['id'])\n axes[i].set_title(labels[i], size=13)\n \n for tick in axes[i].xaxis.get_major_ticks():\n tick.label.set_fontsize(13)\n \n for tick in axes[i].yaxis.get_major_ticks():\n tick.label.set_fontsize(13)\n \n axes[1].legend(names, bbox_to_anchor=(1.5, 1.2),\n loc=5,\n ncol=3,\n markerscale = 1.0, \n borderaxespad=0.,framealpha=1, \n prop={'size':12})\n \n #plt.show()\n return fig\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"numpy.load",
"matplotlib.pyplot.subplots"
]
] |
sebastianwindeck/AML_Task5 | [
"cf261827bb28604f44a1f8ce8212c0711910b5a7"
] | [
"helpers/plotter.py"
] | [
"import numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\n\ndef _plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n eps = 10**(-6)\n\n if normalize:\n cm = cm.astype('float') / (cm.sum(axis=1)[:, np.newaxis]+eps)\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n\n plt.show()\n\ndef plot_confusion_matrix(classes, y_true, y_pred):\n\n cnf_matrix = confusion_matrix(y_true=np.ravel(y_true), y_pred=y_pred)\n np.set_printoptions(precision=2)\n\n plt.figure()\n _plot_confusion_matrix(cnf_matrix, classes=classes,\n title='Confusion matrix, without normalization')\n\n # Plot normalized confusion matrix\n plt.figure()\n _plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,\n title='Normalized confusion matrix')\n\n\ndef plotter_input(data, labels):\n\n nr = 3\n nc = 3\n fig = plt.figure()\n j = 0\n for i in (0, 1, 2):\n ind = np.where(labels == i)\n ind = ind[0]\n print(ind.shape)\n for k in (0, 1, 2):\n ctr = j + 1\n print(ctr)\n ax = fig.add_subplot(nr, nc, ctr)\n data_p = data[ind[0], k * 128:(k + 1) * 128]\n ax.plot(data_p, color='b', alpha=0.2)\n data_p = data[ind[1], k * 128:(k + 1) * 128]\n ax.plot(data_p, color='b', alpha=0.2)\n data_p = data[ind[2], k * 128:(k + 1) * 128]\n ax.plot(data_p, color='b', alpha=0.2)\n ax.set_title(label=('Class: ', i, ' Type: ', k))\n j = j + 1\n\n plt.show()"
] | [
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"numpy.set_printoptions",
"matplotlib.pyplot.title",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.ravel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.imshow"
]
] |
mehmani/MFRED-DisAgV001 | [
"245f457c630a8149275728d47c7ac56188c3c20f"
] | [
"scores_sequence.py"
] | [
"def scores_sequence(Y_pred, Y_test, activation_threshold = 0.1 ,plot_results= False, print_results = False):\r\n \r\n import numpy as np\r\n import pandas as pd\r\n import seaborn as sns\r\n import matplotlib.pyplot as plt\r\n\r\n \"\"\"\r\n a function that computes the classification scores with various metrics\r\n return: dictionary with the various scores\r\n\r\n \"\"\"\r\n\r\n # post process the data\r\n\r\n np.putmask(Y_pred[:,0], Y_pred[:,0] <=0, 0)\r\n np.putmask(Y_pred[:,1], Y_pred[:,1] >=1, 1)\r\n np.putmask(Y_pred[:,0],Y_pred[:,1] < Y_pred[:,0],0)\r\n np.putmask(Y_pred[:,1],Y_pred[:,1] < Y_pred[:,0],0)\r\n np.putmask(Y_pred[:,1],Y_pred[:,2] < activation_threshold,0)\r\n np.putmask(Y_pred[:,0],Y_pred[:,2] < activation_threshold,0) \r\n\r\n # find negative in prediction\r\n pred_negatives = (Y_pred[:,0] ==0) &(Y_pred[:,1] ==0)\r\n pred_positives = ~pred_negatives\r\n obs_negatives = (Y_test[:,0] ==0) &(Y_test[:,1] ==0)\r\n obs_positives = ~obs_negatives\r\n TP = obs_positives[pred_positives].sum()\r\n FN = obs_positives[pred_negatives].sum()\r\n TN = obs_negatives[pred_negatives].sum()\r\n FP = obs_negatives[pred_positives].sum()\r\n\r\n recall = TP / float(TP + FN)\r\n precision = TP / float(TP+ FP)\r\n f1 = 2* precision*recall / (precision + recall)\r\n accuracy = (TP + TN)/ float(obs_negatives.sum() +obs_positives.sum() )\r\n if print_results:\r\n print('number of Predicted negatives:',pred_negatives.sum() )\r\n print('number of Predicted positives:',pred_positives.sum() )\r\n print('number of Observed negatives:', obs_negatives.sum() )\r\n print('number of Observed positives:', obs_positives.sum() )\r\n print('f1:', f1)\r\n print('precision :' ,precision)\r\n print('recall : ', recall)\r\n print('accuracy:', accuracy)\r\n\r\n results = {\r\n 'accuracy': accuracy,\r\n 'f1_score': f1,\r\n 'precision': precision,\r\n 'recall_score': recall}\r\n if plot_results:\r\n pd_results = pd.DataFrame.from_dict(results, orient = 'index')\r\n pd_results = pd_results.transpose() \r\n sns.barplot(data = pd_results)\r\n\r\n return results\r\n"
] | [
[
"pandas.DataFrame.from_dict",
"numpy.putmask"
]
] |
sejunssi/benchmarking_gnns | [
"881c92850f5366f7f8ca008da375037a41936681"
] | [
"tmp/smoothing_all.py"
] | [
"# %% md\n\n# Notebook for generating and saving SBM CLUSTER graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\n\n# % matplotlib\n# inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\nimport os\nprint(os.getcwd())\n\n# %% md\n\n# Generate SBM CLUSTER graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n\n # signal on block model\n u = np.zeros(c.shape[0])\n for r in range(nb_of_clust):\n cluster = np.where(c == r)[0]\n s = cluster[np.random.randint(cluster.shape[0])]\n u[s] = r + 1\n\n # target\n target = c\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 6\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.55\nSBM_parameters['q'] = 0.25\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\nprint(data.nb_nodes)\nprint(data.W)\nprint(data.rand_idx)\nprint(data.node_feat)\nprint(data.node_label)\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef generate_semisuperclust_dataset(nb_graphs):\n dataset = []\n for i in range(nb_graphs):\n if not i % 250:\n print(i)\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset.append(graph)\n return dataset\n\nprint(\"hello\")\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\ndef SBMs_CLUSTER(nb_graphs, name):\n dataset = generate_semisuperclust_dataset(nb_graphs)\n print(len(dataset))\n with open(name + '.pkl', \"wb\") as f:\n pickle.dump(dataset, f)\n plot_histo_graphs(dataset, name)\n\n\nstart = time.time()\n\n\n\n\nnb_graphs = 10000 # train\n# nb_graphs = 3333 # train\n# nb_graphs = 500 # train\n# nb_graphs = 20 # train\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_train')\n\nprint(\"hello2\")\n\nimport pickle\nimport os\nprint(os.getcwd())\nwith open('SBM_CLUSTER_train.pkl', 'rb') as f:\n data = pickle.load(f)\n#\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n# with open('new_SBM_CLUSTER_train_before_smoothing.pkl', 'wb') as f:\n# pickle.dump(data, f)\n\n# dataset = LoadData(SBM_CLUSTER) # 29s\n# trainset, valset, testset = dataset.train, dataset.val, dataset.test\n\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n\nimport networkx as nx\n\n# train = data\n\n#W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\n#rand_idx_list = list(map(lambda d: d['rand_idx'], data))\n#node_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(2, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\n\nwith open('SBM_CLUSTER_train.pkl', 'wb') as f:\n pickle.dump(data, f)\n\nnb_graphs = 1000 # val\n# nb_graphs = 333 # val\n# nb_graphs = 100 # val\n# nb_graphs = 5 # val\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_val')\n\nnb_graphs = 1000 # test\n# nb_graphs = 333 # test\n# nb_graphs = 100 # test\n# nb_graphs = 5 # test\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_test')\n\nprint('Time (sec):', time.time() - start) # 190s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nimport os\nimport sys\n#sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n#os.chdir('../../')\n#sys.path.append(\"/home/bigdyl/sejoon/benchmarking_gnn/data/SBMs.py\")\nprint(os.getcwd())\nprint(sys.path)\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_CLUSTER'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 3983s\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_CLUSTER_a2.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start)\n\n# %% md\n\n# # Test load function\n#\n# # %%\n#\n# DATASET_NAME = 'SBM_CLUSTER'\n# dataset = LoadData(DATASET_NAME) # 29s\n# trainset, valset, testset = dataset.train, dataset.val, dataset.test\n#\n# # %%\n#\n# start = time.time()\n#\n# batch_size = 10\n# collate = SBMsDataset.collate\n# print(SBMsDataset)\n# train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, collate_fn=collate)\n#\n# print('Time (sec):', time.time() - start) # 0.002s\n\n# %%\n# %% md\n\n# Notebook for generating and saving SBM CLUSTER graphs\n\n# %%\n\n# %% md\n\n# Notebook for generating and saving SBM CLUSTER graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\n\n# % matplotlib\n# inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\nimport os\nprint(os.getcwd())\n\n# %% md\n\n# Generate SBM CLUSTER graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n\n # signal on block model\n u = np.zeros(c.shape[0])\n for r in range(nb_of_clust):\n cluster = np.where(c == r)[0]\n s = cluster[np.random.randint(cluster.shape[0])]\n u[s] = r + 1\n\n # target\n target = c\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 6\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.55\nSBM_parameters['q'] = 0.25\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\nprint(data.nb_nodes)\nprint(data.W)\nprint(data.rand_idx)\nprint(data.node_feat)\nprint(data.node_label)\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef generate_semisuperclust_dataset(nb_graphs):\n dataset = []\n for i in range(nb_graphs):\n if not i % 250:\n print(i)\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset.append(graph)\n return dataset\n\nprint(\"hello\")\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\ndef SBMs_CLUSTER(nb_graphs, name):\n dataset = generate_semisuperclust_dataset(nb_graphs)\n print(len(dataset))\n with open(name + '.pkl', \"wb\") as f:\n pickle.dump(dataset, f)\n plot_histo_graphs(dataset, name)\n\n\nstart = time.time()\n\n\n\n\nnb_graphs = 10000 # train\n# nb_graphs = 3333 # train\n# nb_graphs = 500 # train\n# nb_graphs = 20 # train\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_train')\n\nprint(\"hello2\")\n\nimport pickle\nimport os\nprint(os.getcwd())\nwith open('SBM_CLUSTER_train.pkl', 'rb') as f:\n data = pickle.load(f)\n#\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n# with open('new_SBM_CLUSTER_train_before_smoothing.pkl', 'wb') as f:\n# pickle.dump(data, f)\n\n# dataset = LoadData(SBM_CLUSTER) # 29s\n# trainset, valset, testset = dataset.train, dataset.val, dataset.test\n\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n\nimport networkx as nx\n\n# train = data\n\n#W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\nrand_idx_list = list(map(lambda d: d['rand_idx'], data))\nnode_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(3, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\nwith open('SBM_CLUSTER_train.pkl', 'wb') as f:\n pickle.dump(data, f)\n\nnb_graphs = 1000 # val\n# nb_graphs = 333 # val\n# nb_graphs = 100 # val\n# nb_graphs = 5 # val\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_val')\n\nnb_graphs = 1000 # test\n# nb_graphs = 333 # test\n# nb_graphs = 100 # test\n# nb_graphs = 5 # test\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_test')\n\nprint('Time (sec):', time.time() - start) # 190s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_CLUSTER'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 3983s\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_CLUSTER_a3.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM CLUSTER graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\n\n# % matplotlib\n# inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\nimport os\nprint(os.getcwd())\n\n# %% md\n\n# Generate SBM CLUSTER graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n\n # signal on block model\n u = np.zeros(c.shape[0])\n for r in range(nb_of_clust):\n cluster = np.where(c == r)[0]\n s = cluster[np.random.randint(cluster.shape[0])]\n u[s] = r + 1\n\n # target\n target = c\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 6\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.55\nSBM_parameters['q'] = 0.25\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\nprint(data.nb_nodes)\nprint(data.W)\nprint(data.rand_idx)\nprint(data.node_feat)\nprint(data.node_label)\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef generate_semisuperclust_dataset(nb_graphs):\n dataset = []\n for i in range(nb_graphs):\n if not i % 250:\n print(i)\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset.append(graph)\n return dataset\n\nprint(\"hello\")\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\ndef SBMs_CLUSTER(nb_graphs, name):\n dataset = generate_semisuperclust_dataset(nb_graphs)\n print(len(dataset))\n with open(name + '.pkl', \"wb\") as f:\n pickle.dump(dataset, f)\n plot_histo_graphs(dataset, name)\n\n\nstart = time.time()\n\n\n\n\nnb_graphs = 10000 # train\n# nb_graphs = 3333 # train\n# nb_graphs = 500 # train\n# nb_graphs = 20 # train\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_train')\n\nprint(\"hello2\")\n\nimport pickle\nimport os\nprint(os.getcwd())\nwith open('SBM_CLUSTER_train.pkl', 'rb') as f:\n data = pickle.load(f)\n#\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n# with open('new_SBM_CLUSTER_train_before_smoothing.pkl', 'wb') as f:\n# pickle.dump(data, f)\n\n# dataset = LoadData(SBM_CLUSTER) # 29s\n# trainset, valset, testset = dataset.train, dataset.val, dataset.test\n\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n\nimport networkx as nx\n\n# train = data\n\n#W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\nrand_idx_list = list(map(lambda d: d['rand_idx'], data))\nnode_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(4, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\nwith open('SBM_CLUSTER_train.pkl', 'wb') as f:\n pickle.dump(data, f)\n\nnb_graphs = 1000 # val\n# nb_graphs = 333 # val\n# nb_graphs = 100 # val\n# nb_graphs = 5 # val\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_val')\n\nnb_graphs = 1000 # test\n# nb_graphs = 333 # test\n# nb_graphs = 100 # test\n# nb_graphs = 5 # test\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_test')\n\nprint('Time (sec):', time.time() - start) # 190s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_CLUSTER'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 3983s\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_CLUSTER_a4.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start)\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM CLUSTER graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\n\n# % matplotlib\n# inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\nimport os\nprint(os.getcwd())\n\n# %% md\n\n# Generate SBM CLUSTER graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n\n # signal on block model\n u = np.zeros(c.shape[0])\n for r in range(nb_of_clust):\n cluster = np.where(c == r)[0]\n s = cluster[np.random.randint(cluster.shape[0])]\n u[s] = r + 1\n\n # target\n target = c\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 6\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.55\nSBM_parameters['q'] = 0.25\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\nprint(data.nb_nodes)\nprint(data.W)\nprint(data.rand_idx)\nprint(data.node_feat)\nprint(data.node_label)\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef generate_semisuperclust_dataset(nb_graphs):\n dataset = []\n for i in range(nb_graphs):\n if not i % 250:\n print(i)\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset.append(graph)\n return dataset\n\nprint(\"hello\")\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\ndef SBMs_CLUSTER(nb_graphs, name):\n dataset = generate_semisuperclust_dataset(nb_graphs)\n print(len(dataset))\n with open(name + '.pkl', \"wb\") as f:\n pickle.dump(dataset, f)\n plot_histo_graphs(dataset, name)\n\n\nstart = time.time()\n\n\n\n\nnb_graphs = 10000 # train\n# nb_graphs = 3333 # train\n# nb_graphs = 500 # train\n# nb_graphs = 20 # train\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_train')\n\nprint(\"hello2\")\n\nimport pickle\nimport os\nprint(os.getcwd())\nwith open('SBM_CLUSTER_train.pkl', 'rb') as f:\n data = pickle.load(f)\n#\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n# with open('new_SBM_CLUSTER_train_before_smoothing.pkl', 'wb') as f:\n# pickle.dump(data, f)\n\n# dataset = LoadData(SBM_CLUSTER) # 29s\n# trainset, valset, testset = dataset.train, dataset.val, dataset.test\n\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n\nimport networkx as nx\n\n# train = data\n\n#W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\nrand_idx_list = list(map(lambda d: d['rand_idx'], data))\nnode_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(6, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\nwith open('SBM_CLUSTER_train.pkl', 'wb') as f:\n pickle.dump(data, f)\n\nnb_graphs = 1000 # val\n# nb_graphs = 333 # val\n# nb_graphs = 100 # val\n# nb_graphs = 5 # val\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_val')\n\nnb_graphs = 1000 # test\n# nb_graphs = 333 # test\n# nb_graphs = 100 # test\n# nb_graphs = 5 # test\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_test')\n\nprint('Time (sec):', time.time() - start) # 190s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_CLUSTER'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 3983s\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_CLUSTER_a6.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start)\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM CLUSTER graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\n\n# % matplotlib\n# inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\nimport os\nprint(os.getcwd())\n\n# %% md\n\n# Generate SBM CLUSTER graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n\n # signal on block model\n u = np.zeros(c.shape[0])\n for r in range(nb_of_clust):\n cluster = np.where(c == r)[0]\n s = cluster[np.random.randint(cluster.shape[0])]\n u[s] = r + 1\n\n # target\n target = c\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 6\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.55\nSBM_parameters['q'] = 0.25\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\nprint(data.nb_nodes)\nprint(data.W)\nprint(data.rand_idx)\nprint(data.node_feat)\nprint(data.node_label)\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef generate_semisuperclust_dataset(nb_graphs):\n dataset = []\n for i in range(nb_graphs):\n if not i % 250:\n print(i)\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset.append(graph)\n return dataset\n\nprint(\"hello\")\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\ndef SBMs_CLUSTER(nb_graphs, name):\n dataset = generate_semisuperclust_dataset(nb_graphs)\n print(len(dataset))\n with open(name + '.pkl', \"wb\") as f:\n pickle.dump(dataset, f)\n plot_histo_graphs(dataset, name)\n\n\nstart = time.time()\n\n\n\n\nnb_graphs = 10000 # train\n# nb_graphs = 3333 # train\n# nb_graphs = 500 # train\n# nb_graphs = 20 # train\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_train')\n\nprint(\"hello2\")\n\nimport pickle\nimport os\nprint(os.getcwd())\nwith open('SBM_CLUSTER_train.pkl', 'rb') as f:\n data = pickle.load(f)\n#\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n# with open('new_SBM_CLUSTER_train_before_smoothing.pkl', 'wb') as f:\n# pickle.dump(data, f)\n\n# dataset = LoadData(SBM_CLUSTER) # 29s\n# trainset, valset, testset = dataset.train, dataset.val, dataset.test\n\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n\nimport networkx as nx\n\n# train = data\n\n#W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\nrand_idx_list = list(map(lambda d: d['rand_idx'], data))\nnode_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(8, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\nwith open('SBM_CLUSTER_train.pkl', 'wb') as f:\n pickle.dump(data, f)\n\nnb_graphs = 1000 # val\n# nb_graphs = 333 # val\n# nb_graphs = 100 # val\n# nb_graphs = 5 # val\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_val')\n\nnb_graphs = 1000 # test\n# nb_graphs = 333 # test\n# nb_graphs = 100 # test\n# nb_graphs = 5 # test\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_test')\n\nprint('Time (sec):', time.time() - start) # 190s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_CLUSTER'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 3983s\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_CLUSTER_a8.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start)\n\n\n\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM CLUSTER graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\n\n# % matplotlib\n# inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\nimport os\nprint(os.getcwd())\n\n# %% md\n\n# Generate SBM CLUSTER graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n\n # signal on block model\n u = np.zeros(c.shape[0])\n for r in range(nb_of_clust):\n cluster = np.where(c == r)[0]\n s = cluster[np.random.randint(cluster.shape[0])]\n u[s] = r + 1\n\n # target\n target = c\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 6\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.55\nSBM_parameters['q'] = 0.25\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\nprint(data.nb_nodes)\nprint(data.W)\nprint(data.rand_idx)\nprint(data.node_feat)\nprint(data.node_label)\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef generate_semisuperclust_dataset(nb_graphs):\n dataset = []\n for i in range(nb_graphs):\n if not i % 250:\n print(i)\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset.append(graph)\n return dataset\n\nprint(\"hello\")\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\ndef SBMs_CLUSTER(nb_graphs, name):\n dataset = generate_semisuperclust_dataset(nb_graphs)\n print(len(dataset))\n with open(name + '.pkl', \"wb\") as f:\n pickle.dump(dataset, f)\n plot_histo_graphs(dataset, name)\n\n\nstart = time.time()\n\n\n\n\nnb_graphs = 10000 # train\n# nb_graphs = 3333 # train\n# nb_graphs = 500 # train\n# nb_graphs = 20 # train\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_train')\n\nprint(\"hello2\")\n\nimport pickle\nimport os\nprint(os.getcwd())\nwith open('SBM_CLUSTER_train.pkl', 'rb') as f:\n data = pickle.load(f)\n#\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n# with open('new_SBM_CLUSTER_train_before_smoothing.pkl', 'wb') as f:\n# pickle.dump(data, f)\n\n# dataset = LoadData(SBM_CLUSTER) # 29s\n# trainset, valset, testset = dataset.train, dataset.val, dataset.test\n\n# with open('SBM_CLUSTER_train.pkl', 'rb') as f:\n# data = pickle.load(f)\n\n\nimport networkx as nx\n\n# train = data\n\n#W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\nrand_idx_list = list(map(lambda d: d['rand_idx'], data))\nnode_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(1, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\nwith open('SBM_CLUSTER_train.pkl', 'wb') as f:\n pickle.dump(data, f)\n\nnb_graphs = 1000 # val\n# nb_graphs = 333 # val\n# nb_graphs = 100 # val\n# nb_graphs = 5 # val\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_val')\n\nnb_graphs = 1000 # test\n# nb_graphs = 333 # test\n# nb_graphs = 100 # test\n# nb_graphs = 5 # test\nSBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_test')\n\nprint('Time (sec):', time.time() - start) # 190s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_CLUSTER'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 3983s\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_CLUSTER_a1.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM PATTERN graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\nimport networkx as nx\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\n\n\n# %% md\n\n# Generate SBM PATTERN graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\ndef random_pattern(n, p):\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if np.random.binomial(1, p) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef add_pattern(W0, W, c, nb_of_clust, q):\n n = W.shape[0]\n n0 = W0.shape[0]\n V = (np.random.rand(n0, n) < q).astype(float)\n W_up = np.concatenate((W, V.T), axis=1)\n W_low = np.concatenate((V, W0), axis=1)\n W_new = np.concatenate((W_up, W_low), axis=0)\n c0 = np.full(n0, nb_of_clust)\n c_new = np.concatenate((c, c0), axis=0)\n return W_new, c_new\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n p_pattern = SBM_parameters['p_pattern']\n q_pattern = SBM_parameters['q_pattern']\n vocab_size = SBM_parameters['vocab_size']\n W0 = SBM_parameters['W0']\n u0 = SBM_parameters['u0']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # signal on block model\n u = np.random.randint(vocab_size, size=W.shape[0])\n\n # add the subgraph to be detected\n W, c = add_pattern(W0, W, c, nb_of_clust, q_pattern)\n u = np.concatenate((u, u0), axis=0)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n u = u[idx]\n\n # target\n target = (c == nb_of_clust).astype(float)\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 10\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 15 # 25\nSBM_parameters['p'] = 0.5 # 0.5\nSBM_parameters['q'] = 0.25 # 0.1\nSBM_parameters['p_pattern'] = 0.5 # 0.5\nSBM_parameters['q_pattern'] = 0.25 # 0.1\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 10\nSBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p_pattern'])\nSBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\n# print(data.nb_nodes)\n# print(data.W)\n# print(data.rand_idx)\n# print(data.node_feat)\n# print(data.node_label)\n\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\ntarget = data.node_label\ntarget = target[idx]\nprint(target)\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\nstart = time.time()\n\n# configuration for 100 patterns 100/20\nnb_pattern_instances = 100 # nb of patterns\nnb_train_graphs_per_pattern_instance = 100 # train per pattern\nnb_test_graphs_per_pattern_instance = 20 # test, val per pattern\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 5\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.5\nSBM_parameters['q'] = 0.2\nSBM_parameters['p_pattern'] = 0.5\nSBM_parameters['q_pattern'] = 0.5\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 20\nprint(SBM_parameters)\n\ndataset_train = []\ndataset_val = []\ndataset_test = []\nfor idx in range(nb_pattern_instances):\n\n print('pattern:', idx)\n\n SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p'])\n SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\n for _ in range(nb_train_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_train.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_val.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_test.append(graph)\n\nprint(len(dataset_train), len(dataset_val), len(dataset_test))\n\nplot_histo_graphs(dataset_train, 'train')\nplot_histo_graphs(dataset_val, 'val')\nplot_histo_graphs(dataset_test, 'test')\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\nwith open('SBM_PATTERN_train.pkl', 'rb') as f:\n data = pickle.load(f)\n\n # W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\n#rand_idx_list = list(map(lambda d: d['rand_idx'], data))\n#node_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(2, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\n# with open('smoothed_SBM_CLUSTER_0406', 'wb') as f:\n# pickle.dump(data, f)\n\n\ndataset_train = data\n\n\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\n\nwith open('SBM_PATTERN_val.pkl', \"wb\") as f:\n pickle.dump(dataset_val, f)\nwith open('SBM_PATTERN_test.pkl', \"wb\") as f:\n pickle.dump(dataset_test, f)\n\nprint('Time (sec):', time.time() - start) # 163s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_PATTERN'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 4424s = 73min\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('../data/SBMs/SBM_PATTERN_a2.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start) # 21s\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM PATTERN graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\nimport networkx as nx\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\n\n\n# %% md\n\n# Generate SBM PATTERN graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\ndef random_pattern(n, p):\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if np.random.binomial(1, p) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef add_pattern(W0, W, c, nb_of_clust, q):\n n = W.shape[0]\n n0 = W0.shape[0]\n V = (np.random.rand(n0, n) < q).astype(float)\n W_up = np.concatenate((W, V.T), axis=1)\n W_low = np.concatenate((V, W0), axis=1)\n W_new = np.concatenate((W_up, W_low), axis=0)\n c0 = np.full(n0, nb_of_clust)\n c_new = np.concatenate((c, c0), axis=0)\n return W_new, c_new\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n p_pattern = SBM_parameters['p_pattern']\n q_pattern = SBM_parameters['q_pattern']\n vocab_size = SBM_parameters['vocab_size']\n W0 = SBM_parameters['W0']\n u0 = SBM_parameters['u0']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # signal on block model\n u = np.random.randint(vocab_size, size=W.shape[0])\n\n # add the subgraph to be detected\n W, c = add_pattern(W0, W, c, nb_of_clust, q_pattern)\n u = np.concatenate((u, u0), axis=0)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n u = u[idx]\n\n # target\n target = (c == nb_of_clust).astype(float)\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 10\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 15 # 25\nSBM_parameters['p'] = 0.5 # 0.5\nSBM_parameters['q'] = 0.25 # 0.1\nSBM_parameters['p_pattern'] = 0.5 # 0.5\nSBM_parameters['q_pattern'] = 0.25 # 0.1\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 10\nSBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p_pattern'])\nSBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\n# print(data.nb_nodes)\n# print(data.W)\n# print(data.rand_idx)\n# print(data.node_feat)\n# print(data.node_label)\n\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\ntarget = data.node_label\ntarget = target[idx]\nprint(target)\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\nstart = time.time()\n\n# configuration for 100 patterns 100/20\nnb_pattern_instances = 100 # nb of patterns\nnb_train_graphs_per_pattern_instance = 100 # train per pattern\nnb_test_graphs_per_pattern_instance = 20 # test, val per pattern\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 5\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.5\nSBM_parameters['q'] = 0.2\nSBM_parameters['p_pattern'] = 0.5\nSBM_parameters['q_pattern'] = 0.5\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 20\nprint(SBM_parameters)\n\ndataset_train = []\ndataset_val = []\ndataset_test = []\nfor idx in range(nb_pattern_instances):\n\n print('pattern:', idx)\n\n SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p'])\n SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\n for _ in range(nb_train_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_train.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_val.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_test.append(graph)\n\nprint(len(dataset_train), len(dataset_val), len(dataset_test))\n\nplot_histo_graphs(dataset_train, 'train')\nplot_histo_graphs(dataset_val, 'val')\nplot_histo_graphs(dataset_test, 'test')\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\nwith open('SBM_PATTERN_train.pkl', 'rb') as f:\n data = pickle.load(f)\n\n # W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\n#rand_idx_list = list(map(lambda d: d['rand_idx'], data))\n#node_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(3, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\n# with open('smoothed_SBM_CLUSTER_0406', 'wb') as f:\n# pickle.dump(data, f)\n\n\ndataset_train = data\n\n\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\n\nwith open('SBM_PATTERN_val.pkl', \"wb\") as f:\n pickle.dump(dataset_val, f)\nwith open('SBM_PATTERN_test.pkl', \"wb\") as f:\n pickle.dump(dataset_test, f)\n\nprint('Time (sec):', time.time() - start) # 163s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_PATTERN'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 4424s = 73min\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_PATTERN_a3.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start) # 21s\n\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM PATTERN graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\nimport networkx as nx\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\n\n\n# %% md\n\n# Generate SBM PATTERN graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\ndef random_pattern(n, p):\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if np.random.binomial(1, p) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef add_pattern(W0, W, c, nb_of_clust, q):\n n = W.shape[0]\n n0 = W0.shape[0]\n V = (np.random.rand(n0, n) < q).astype(float)\n W_up = np.concatenate((W, V.T), axis=1)\n W_low = np.concatenate((V, W0), axis=1)\n W_new = np.concatenate((W_up, W_low), axis=0)\n c0 = np.full(n0, nb_of_clust)\n c_new = np.concatenate((c, c0), axis=0)\n return W_new, c_new\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n p_pattern = SBM_parameters['p_pattern']\n q_pattern = SBM_parameters['q_pattern']\n vocab_size = SBM_parameters['vocab_size']\n W0 = SBM_parameters['W0']\n u0 = SBM_parameters['u0']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # signal on block model\n u = np.random.randint(vocab_size, size=W.shape[0])\n\n # add the subgraph to be detected\n W, c = add_pattern(W0, W, c, nb_of_clust, q_pattern)\n u = np.concatenate((u, u0), axis=0)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n u = u[idx]\n\n # target\n target = (c == nb_of_clust).astype(float)\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 10\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 15 # 25\nSBM_parameters['p'] = 0.5 # 0.5\nSBM_parameters['q'] = 0.25 # 0.1\nSBM_parameters['p_pattern'] = 0.5 # 0.5\nSBM_parameters['q_pattern'] = 0.25 # 0.1\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 10\nSBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p_pattern'])\nSBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\n# print(data.nb_nodes)\n# print(data.W)\n# print(data.rand_idx)\n# print(data.node_feat)\n# print(data.node_label)\n\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\ntarget = data.node_label\ntarget = target[idx]\nprint(target)\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\nstart = time.time()\n\n# configuration for 100 patterns 100/20\nnb_pattern_instances = 100 # nb of patterns\nnb_train_graphs_per_pattern_instance = 100 # train per pattern\nnb_test_graphs_per_pattern_instance = 20 # test, val per pattern\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 5\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.5\nSBM_parameters['q'] = 0.2\nSBM_parameters['p_pattern'] = 0.5\nSBM_parameters['q_pattern'] = 0.5\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 20\nprint(SBM_parameters)\n\ndataset_train = []\ndataset_val = []\ndataset_test = []\nfor idx in range(nb_pattern_instances):\n\n print('pattern:', idx)\n\n SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p'])\n SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\n for _ in range(nb_train_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_train.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_val.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_test.append(graph)\n\nprint(len(dataset_train), len(dataset_val), len(dataset_test))\n\nplot_histo_graphs(dataset_train, 'train')\nplot_histo_graphs(dataset_val, 'val')\nplot_histo_graphs(dataset_test, 'test')\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\nwith open('SBM_PATTERN_train.pkl', 'rb') as f:\n data = pickle.load(f)\n\n # W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\n#rand_idx_list = list(map(lambda d: d['rand_idx'], data))\n#node_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(4, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\n# with open('smoothed_SBM_CLUSTER_0406', 'wb') as f:\n# pickle.dump(data, f)\n\n\ndataset_train = data\n\n\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\n\nwith open('SBM_PATTERN_val.pkl', \"wb\") as f:\n pickle.dump(dataset_val, f)\nwith open('SBM_PATTERN_test.pkl', \"wb\") as f:\n pickle.dump(dataset_test, f)\n\nprint('Time (sec):', time.time() - start) # 163s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_PATTERN'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 4424s = 73min\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_PATTERN_a4.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start) # 21s\n\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM PATTERN graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\nimport networkx as nx\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\n\n\n# %% md\n\n# Generate SBM PATTERN graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\ndef random_pattern(n, p):\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if np.random.binomial(1, p) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef add_pattern(W0, W, c, nb_of_clust, q):\n n = W.shape[0]\n n0 = W0.shape[0]\n V = (np.random.rand(n0, n) < q).astype(float)\n W_up = np.concatenate((W, V.T), axis=1)\n W_low = np.concatenate((V, W0), axis=1)\n W_new = np.concatenate((W_up, W_low), axis=0)\n c0 = np.full(n0, nb_of_clust)\n c_new = np.concatenate((c, c0), axis=0)\n return W_new, c_new\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n p_pattern = SBM_parameters['p_pattern']\n q_pattern = SBM_parameters['q_pattern']\n vocab_size = SBM_parameters['vocab_size']\n W0 = SBM_parameters['W0']\n u0 = SBM_parameters['u0']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # signal on block model\n u = np.random.randint(vocab_size, size=W.shape[0])\n\n # add the subgraph to be detected\n W, c = add_pattern(W0, W, c, nb_of_clust, q_pattern)\n u = np.concatenate((u, u0), axis=0)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n u = u[idx]\n\n # target\n target = (c == nb_of_clust).astype(float)\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 10\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 15 # 25\nSBM_parameters['p'] = 0.5 # 0.5\nSBM_parameters['q'] = 0.25 # 0.1\nSBM_parameters['p_pattern'] = 0.5 # 0.5\nSBM_parameters['q_pattern'] = 0.25 # 0.1\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 10\nSBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p_pattern'])\nSBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\n# print(data.nb_nodes)\n# print(data.W)\n# print(data.rand_idx)\n# print(data.node_feat)\n# print(data.node_label)\n\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\ntarget = data.node_label\ntarget = target[idx]\nprint(target)\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\nstart = time.time()\n\n# configuration for 100 patterns 100/20\nnb_pattern_instances = 100 # nb of patterns\nnb_train_graphs_per_pattern_instance = 100 # train per pattern\nnb_test_graphs_per_pattern_instance = 20 # test, val per pattern\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 5\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.5\nSBM_parameters['q'] = 0.2\nSBM_parameters['p_pattern'] = 0.5\nSBM_parameters['q_pattern'] = 0.5\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 20\nprint(SBM_parameters)\n\ndataset_train = []\ndataset_val = []\ndataset_test = []\nfor idx in range(nb_pattern_instances):\n\n print('pattern:', idx)\n\n SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p'])\n SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\n for _ in range(nb_train_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_train.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_val.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_test.append(graph)\n\nprint(len(dataset_train), len(dataset_val), len(dataset_test))\n\nplot_histo_graphs(dataset_train, 'train')\nplot_histo_graphs(dataset_val, 'val')\nplot_histo_graphs(dataset_test, 'test')\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\nwith open('SBM_PATTERN_train.pkl', 'rb') as f:\n data = pickle.load(f)\n\n # W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\n#rand_idx_list = list(map(lambda d: d['rand_idx'], data))\n#node_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(6, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\n# with open('smoothed_SBM_CLUSTER_0406', 'wb') as f:\n# pickle.dump(data, f)\n\n\ndataset_train = data\n\n\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\n\nwith open('SBM_PATTERN_val.pkl', \"wb\") as f:\n pickle.dump(dataset_val, f)\nwith open('SBM_PATTERN_test.pkl', \"wb\") as f:\n pickle.dump(dataset_test, f)\n\nprint('Time (sec):', time.time() - start) # 163s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_PATTERN'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 4424s = 73min\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_PATTERN_a6.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start) # 21s\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM PATTERN graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\nimport networkx as nx\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\n\n\n# %% md\n\n# Generate SBM PATTERN graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\ndef random_pattern(n, p):\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if np.random.binomial(1, p) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef add_pattern(W0, W, c, nb_of_clust, q):\n n = W.shape[0]\n n0 = W0.shape[0]\n V = (np.random.rand(n0, n) < q).astype(float)\n W_up = np.concatenate((W, V.T), axis=1)\n W_low = np.concatenate((V, W0), axis=1)\n W_new = np.concatenate((W_up, W_low), axis=0)\n c0 = np.full(n0, nb_of_clust)\n c_new = np.concatenate((c, c0), axis=0)\n return W_new, c_new\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n p_pattern = SBM_parameters['p_pattern']\n q_pattern = SBM_parameters['q_pattern']\n vocab_size = SBM_parameters['vocab_size']\n W0 = SBM_parameters['W0']\n u0 = SBM_parameters['u0']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # signal on block model\n u = np.random.randint(vocab_size, size=W.shape[0])\n\n # add the subgraph to be detected\n W, c = add_pattern(W0, W, c, nb_of_clust, q_pattern)\n u = np.concatenate((u, u0), axis=0)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n u = u[idx]\n\n # target\n target = (c == nb_of_clust).astype(float)\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 10\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 15 # 25\nSBM_parameters['p'] = 0.5 # 0.5\nSBM_parameters['q'] = 0.25 # 0.1\nSBM_parameters['p_pattern'] = 0.5 # 0.5\nSBM_parameters['q_pattern'] = 0.25 # 0.1\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 10\nSBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p_pattern'])\nSBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\n# print(data.nb_nodes)\n# print(data.W)\n# print(data.rand_idx)\n# print(data.node_feat)\n# print(data.node_label)\n\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\ntarget = data.node_label\ntarget = target[idx]\nprint(target)\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\nstart = time.time()\n\n# configuration for 100 patterns 100/20\nnb_pattern_instances = 100 # nb of patterns\nnb_train_graphs_per_pattern_instance = 100 # train per pattern\nnb_test_graphs_per_pattern_instance = 20 # test, val per pattern\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 5\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.5\nSBM_parameters['q'] = 0.2\nSBM_parameters['p_pattern'] = 0.5\nSBM_parameters['q_pattern'] = 0.5\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 20\nprint(SBM_parameters)\n\ndataset_train = []\ndataset_val = []\ndataset_test = []\nfor idx in range(nb_pattern_instances):\n\n print('pattern:', idx)\n\n SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p'])\n SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\n for _ in range(nb_train_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_train.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_val.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_test.append(graph)\n\nprint(len(dataset_train), len(dataset_val), len(dataset_test))\n\nplot_histo_graphs(dataset_train, 'train')\nplot_histo_graphs(dataset_val, 'val')\nplot_histo_graphs(dataset_test, 'test')\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\nwith open('SBM_PATTERN_train.pkl', 'rb') as f:\n data = pickle.load(f)\n\n # W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\n#rand_idx_list = list(map(lambda d: d['rand_idx'], data))\n#node_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(8, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\n# with open('smoothed_SBM_CLUSTER_0406', 'wb') as f:\n# pickle.dump(data, f)\n\n\ndataset_train = data\n\n\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\n\nwith open('SBM_PATTERN_val.pkl', \"wb\") as f:\n pickle.dump(dataset_val, f)\nwith open('SBM_PATTERN_test.pkl', \"wb\") as f:\n pickle.dump(dataset_test, f)\n\nprint('Time (sec):', time.time() - start) # 163s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_PATTERN'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 4424s = 73min\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_PATTERN_a8.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start) # 21s\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %% md\n\n# Notebook for generating and saving SBM PATTERN graphs\n\n# %%\n\nimport numpy as np\nimport torch\nimport pickle\nimport time\nimport networkx as nx\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport scipy.sparse\n\n\n# %% md\n\n# Generate SBM PATTERN graphs\n\n# %%\n\n\ndef schuffle(W, c):\n # relabel the vertices at random\n idx = np.random.permutation(W.shape[0])\n # idx2=np.argsort(idx) # for index ordering wrt classes\n W_new = W[idx, :]\n W_new = W_new[:, idx]\n c_new = c[idx]\n return W_new, c_new, idx\n\n\ndef block_model(c, p, q):\n n = len(c)\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if c[i] == c[j]:\n prob = p\n else:\n prob = q\n if np.random.binomial(1, prob) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):\n c = []\n for r in range(nb_of_clust):\n if clust_size_max == clust_size_min:\n clust_size_r = clust_size_max\n else:\n clust_size_r = np.random.randint(clust_size_min, clust_size_max, size=1)[0]\n val_r = np.repeat(r, clust_size_r, axis=0)\n c.append(val_r)\n c = np.concatenate(c)\n W = block_model(c, p, q)\n return W, c\n\n\ndef random_pattern(n, p):\n W = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n if np.random.binomial(1, p) == 1:\n W[i, j] = 1\n W[j, i] = 1\n return W\n\n\ndef add_pattern(W0, W, c, nb_of_clust, q):\n n = W.shape[0]\n n0 = W0.shape[0]\n V = (np.random.rand(n0, n) < q).astype(float)\n W_up = np.concatenate((W, V.T), axis=1)\n W_low = np.concatenate((V, W0), axis=1)\n W_new = np.concatenate((W_up, W_low), axis=0)\n c0 = np.full(n0, nb_of_clust)\n c_new = np.concatenate((c, c0), axis=0)\n return W_new, c_new\n\n\nclass generate_SBM_graph():\n\n def __init__(self, SBM_parameters):\n # parameters\n nb_of_clust = SBM_parameters['nb_clusters']\n clust_size_min = SBM_parameters['size_min']\n clust_size_max = SBM_parameters['size_max']\n p = SBM_parameters['p']\n q = SBM_parameters['q']\n p_pattern = SBM_parameters['p_pattern']\n q_pattern = SBM_parameters['q_pattern']\n vocab_size = SBM_parameters['vocab_size']\n W0 = SBM_parameters['W0']\n u0 = SBM_parameters['u0']\n\n # block model\n W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)\n\n # signal on block model\n u = np.random.randint(vocab_size, size=W.shape[0])\n\n # add the subgraph to be detected\n W, c = add_pattern(W0, W, c, nb_of_clust, q_pattern)\n u = np.concatenate((u, u0), axis=0)\n\n # shuffle\n W, c, idx = schuffle(W, c)\n u = u[idx]\n\n # target\n target = (c == nb_of_clust).astype(float)\n\n # convert to pytorch\n W = torch.from_numpy(W)\n W = W.to(torch.int8)\n idx = torch.from_numpy(idx)\n idx = idx.to(torch.int16)\n u = torch.from_numpy(u)\n u = u.to(torch.int16)\n target = torch.from_numpy(target)\n target = target.to(torch.int16)\n\n # attributes\n self.nb_nodes = W.size(0)\n self.W = W\n self.rand_idx = idx\n self.node_feat = u\n self.node_label = target\n\n\n# configuration\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 10\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 15 # 25\nSBM_parameters['p'] = 0.5 # 0.5\nSBM_parameters['q'] = 0.25 # 0.1\nSBM_parameters['p_pattern'] = 0.5 # 0.5\nSBM_parameters['q_pattern'] = 0.25 # 0.1\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 10\nSBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p_pattern'])\nSBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\nprint(SBM_parameters)\n\ndata = generate_SBM_graph(SBM_parameters)\n\nprint(data)\n# print(data.nb_nodes)\n# print(data.W)\n# print(data.rand_idx)\n# print(data.node_feat)\n# print(data.node_label)\n\n\n# %%\n\n# Plot Adj matrix\n\nW = data.W\nplt.spy(W, precision=0.01, markersize=1)\nplt.show()\n\nidx = np.argsort(data.rand_idx)\nW = data.W\nW2 = W[idx, :]\nW2 = W2[:, idx]\nplt.spy(W2, precision=0.01, markersize=1)\nplt.show()\n\ntarget = data.node_label\ntarget = target[idx]\nprint(target)\n\n\n# %%\n\n\n# %%\n\n# Generate and save SBM graphs\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\ndef plot_histo_graphs(dataset, title):\n # histogram of graph sizes\n graph_sizes = []\n for graph in dataset:\n graph_sizes.append(graph.nb_nodes)\n plt.figure(1)\n plt.hist(graph_sizes, bins=50)\n plt.title(title)\n plt.show()\n\n\nstart = time.time()\n\n# configuration for 100 patterns 100/20\nnb_pattern_instances = 100 # nb of patterns\nnb_train_graphs_per_pattern_instance = 100 # train per pattern\nnb_test_graphs_per_pattern_instance = 20 # test, val per pattern\nSBM_parameters = {}\nSBM_parameters['nb_clusters'] = 5\nSBM_parameters['size_min'] = 5\nSBM_parameters['size_max'] = 35\nSBM_parameters['p'] = 0.5\nSBM_parameters['q'] = 0.2\nSBM_parameters['p_pattern'] = 0.5\nSBM_parameters['q_pattern'] = 0.5\nSBM_parameters['vocab_size'] = 3\nSBM_parameters['size_subgraph'] = 20\nprint(SBM_parameters)\n\ndataset_train = []\ndataset_val = []\ndataset_test = []\nfor idx in range(nb_pattern_instances):\n\n print('pattern:', idx)\n\n SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'], SBM_parameters['p'])\n SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'], size=SBM_parameters['size_subgraph'])\n\n for _ in range(nb_train_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_train.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_val.append(graph)\n\n for _ in range(nb_test_graphs_per_pattern_instance):\n data = generate_SBM_graph(SBM_parameters)\n graph = DotDict()\n graph.nb_nodes = data.nb_nodes\n graph.W = data.W\n graph.rand_idx = data.rand_idx\n graph.node_feat = data.node_feat\n graph.node_label = data.node_label\n dataset_test.append(graph)\n\nprint(len(dataset_train), len(dataset_val), len(dataset_test))\n\nplot_histo_graphs(dataset_train, 'train')\nplot_histo_graphs(dataset_val, 'val')\nplot_histo_graphs(dataset_test, 'test')\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\nwith open('SBM_PATTERN_train.pkl', 'rb') as f:\n data = pickle.load(f)\n\n # W_list = list(map(lambda d: d['W'].numpy(), data))\nW_lists = list(map(lambda d: d['W'].numpy(), data))\n#rand_idx_list = list(map(lambda d: d['rand_idx'], data))\n#node_feat_list = list(map(lambda d: d['node_feat'], data))\nnode_label_list = list(map(lambda d: d['node_label'].numpy(), data))\n\n\nclass ProgressSmoothing:\n def __init__(self, g_nx):\n self.g_nx = g_nx\n\n def _get_weight_list(self, a, m, neighbor_list_dict):\n denominator = 0\n weight_list = [0 for _ in range(m)]\n for h in range(0, m):\n weighting = np.power(a, (m - h))\n # print(len(neighbor_list_dict[h]))\n num_nodes = len(neighbor_list_dict[h])\n weight_list[h] = weighting * num_nodes\n\n # print(weighting, \"@\")\n # print(num_nodes, \"#\")\n denominator += weighting * num_nodes\n # print(type(denominator))\n # print(type(weight_list))\n # print(weight_list/denominator)\n return weight_list / denominator\n\n def nei_dict(self, hop_dict):\n neighbor_list_dict = {} # neighbor_list_dict = {which_hop: [index1, index5, ....]}\n for u, h in hop_dict.items(): # hop_dict = {neighbor_id : which_hop}\n if not h in neighbor_list_dict.keys():\n n_list = [u] # include self node\n neighbor_list_dict[h] = n_list\n else:\n neighbor_list_dict[h].append(u)\n return neighbor_list_dict\n\n def get_neigh_smooth_weight(self, v, a):\n # hop_dict = nx.single_source_shortest_path_length(self.g_nx, v)\n hop_dict = nx.single_source_shortest_path_length(self.g_nx, v, 2)\n neighbor_list_dict = self.nei_dict(hop_dict)\n # print(neighbor_list_dict)\n m = np.max(list(neighbor_list_dict.keys()))\n weight_list = self._get_weight_list(a, m, neighbor_list_dict)\n # print(weight_list)\n nidx_weight_list = []\n for h in range(0, m):\n for u in neighbor_list_dict[h]:\n nidx_weight_list.append((int(u), weight_list[h]))\n return nidx_weight_list\n\n def smooth_all(self, a, labels):\n total_nidx_weight_list = []\n for v in list(g_nx.nodes):\n # print(v)\n nidx_weight_list = self.get_neigh_smooth_weight(v, a)\n # print(nidx_weight_list)\n total_nidx_weight_list.extend(nidx_weight_list)\n smoothed_labels = labels.copy()\n smoothed_labels = smoothed_labels.astype(float)\n for u, w in total_nidx_weight_list:\n smoothed_labels[u] *= float(w)\n return smoothed_labels\n\n\ntrain_label = []\nfor W, labels in zip(W_lists, node_label_list):\n # train_W =[]\n # W = W.numpy()\n # labels = node_label_list.numpy()\n g_nx = nx.from_numpy_matrix(W)\n ps = ProgressSmoothing(g_nx=g_nx)\n # train_W.append(W)\n train_label.append(ps.smooth_all(1, labels))\n\nnode_label = train_label\n\n# new_data = [{'W':W, 'rand_idx': rand_idx, 'node_feat': node_feat, 'node_label': node_label}\n# for W, rand_idx, node_feat, node_label in zip(W_list, rand_idx_list, node_feat_list, node_label)]\n\nfor idx, smoothed_label in enumerate(node_label):\n data[idx]['node_label'] = torch.tensor(smoothed_label)\n\n# ps = ProgressSmoothing(g_nx=g_nx)\n# smoothed_labels = ps.smooth_all(2, labels)\n\n# with open('new_SBM_CLUSTER_train_0402_03_dataset.pkl', 'wb') as f:\n# pickle.dump(data, f)\n#\n# with open('smoothed_SBM_CLUSTER_0406', 'wb') as f:\n# pickle.dump(data, f)\n\n\ndataset_train = data\n\n\n\nwith open('SBM_PATTERN_train.pkl', \"wb\") as f:\n pickle.dump(dataset_train, f)\n\n\nwith open('SBM_PATTERN_val.pkl', \"wb\") as f:\n pickle.dump(dataset_val, f)\nwith open('SBM_PATTERN_test.pkl', \"wb\") as f:\n pickle.dump(dataset_test, f)\n\nprint('Time (sec):', time.time() - start) # 163s\n\n# %% md\n\n# Convert to DGL format and save with pickle\n\n# %%\n\nimport os\n\n#os.chdir('/') # go to root folder of the project\nprint(os.getcwd())\n\n# %%\n\n\nimport pickle\n\n# % load_ext\n# autoreload\n# % autoreload\n# 2\n\nfrom data.SBMs import SBMsDatasetDGL\n\nfrom data.data import LoadData\nfrom torch.utils.data import DataLoader\nfrom data.SBMs import SBMsDataset\n\n# %%\n\nDATASET_NAME = 'SBM_PATTERN'\ndataset = SBMsDatasetDGL(DATASET_NAME) # 4424s = 73min\n\n# %%\n\nprint(len(dataset.train))\nprint(len(dataset.val))\nprint(len(dataset.test))\n\nprint(dataset.train[0])\nprint(dataset.val[0])\nprint(dataset.test[0])\n\n# %%\n\nstart = time.time()\n\nwith open('data/SBMs/SBM_PATTERN_a1.pkl', 'wb') as f:\n pickle.dump([dataset.train, dataset.val, dataset.test], f)\n\nprint('Time (sec):', time.time() - start) # 21s\n\n"
] | [
[
"numpy.concatenate",
"numpy.full",
"numpy.random.binomial",
"numpy.random.rand",
"numpy.zeros",
"matplotlib.pyplot.spy",
"numpy.random.permutation",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"torch.from_numpy",
"numpy.where",
"matplotlib.pyplot.hist",
"numpy.random.randint",
"torch.tensor",
"numpy.argsort",
"numpy.power",
"numpy.repeat",
"matplotlib.pyplot.show"
]
] |
JsonLieo/RL_Study | [
"9519b999eab505a8396e6c2dab211d05cad0c2b8"
] | [
"RNN/DeepRNN.py"
] | [
"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@author:LXM\n@file:DeepRNN.py\n@time:2020/10/13\n\"\"\"\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport matplotlib.pyplot as plt\nimport tensorflow.keras as keras\n\n# 定义RNN参数\nHIDDEN_SIZE = 30 # LSTM中隐藏节点的个数。\nNUM_LAYERS = 2 # Deep_LSTM的层数。\nTIMESTEPS = 10 # 循环神经网络的训练序列长度。\nTRAINING_STEPS = 3000 # 训练轮数。\nBATCH_SIZE = 32 # batch大小。\nTRAINING_EXAMPLES = 10000 # 训练数据个数。\nTESTING_EXAMPLES = 1000 # 测试数据个数。\nSAMPLE_GAP = 0.01 # 采样间隔。\n\n# 正弦函数采样\ndef generate_data(seq):\n X = []\n y = []\n # 序列的第i项和后面的TIMESTEPS-1项合在一起作为输入;第i + TIMESTEPS项作为输\n # 出。即用sin函数前面的TIMESTEPS个点的信息,预测第i + TIMESTEPS个点的函数值。\n for i in range(len(seq) - TIMESTEPS):\n X.append([seq[i: i + TIMESTEPS]])\n y.append([seq[i + TIMESTEPS]])\n return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)\n\n\n\n# 定义网络结果和优化步骤\ndef lstm_model(X, y, is_training):\n\n cell = tf.nn.rnn_cell.MultiRNNCell([\n tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)\n for _ in range(NUM_LAYERS)])\n\n # 使用TensorFlow接口将多层的LSTM结构连接成RNN网络并计算其前向传播结果。dynamic_rnn被keras.layers.RNN代替\n # outputs, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n # output = outputs[:, -1, :]\n output= keras.layers.RNN(cell)(X)\n\n # 对LSTM网络的输出再做加一层全链接层并计算损失。注意这里默认的损失为平均\n # 平方差损失函数。\n predictions=tf.layers.Dense(1,activation=None)(output)\n # predictions=keras.layers.Dense(1)(output)\n # predictions =tf.contrib.layers.fully_connected(output, 1, activation_fn=None)\n # predictions = tf.contrib.layers.fully_connected(\n # output, 1, activation_fn=None)\n\n # 只在训练时计算损失函数和优化步骤。测试时直接返回预测结果。\n if not is_training:\n return predictions, None, None\n\n # 计算损失函数。\n loss = tf.losses.mean_squared_error(labels=y, predictions=predictions)\n\n # 创建模型优化器并得到优化步骤。\n # 优化损失函数\n train_op=tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)\n # train_op = tf.contrib.layers.optimize_loss(\n # loss, tf.train.get_global_step(),\n # optimizer=\"Adagrad\", learning_rate=0.1)\n\n\n return predictions, loss, train_op\n\n#\ndef train(sess, train_X, train_y):\n ds = tf.data.Dataset.from_tensor_slices((train_X, train_y))\n ds = ds.repeat().shuffle(1000).batch(BATCH_SIZE)\n X, y = ds.make_one_shot_iterator().get_next()\n\n # 定义模型,得到预测结果、损失函数,和训练操作。\n with tf.variable_scope(\"model\"):\n predictions, loss, train_op = lstm_model(X, y, True)\n # 训练模型。\n sess.run(tf.global_variables_initializer())\n for i in range(TRAINING_STEPS):\n _, l = sess.run([train_op, loss])\n if i % 1000 == 0:\n print(\"train step: \" + str(i) + \", loss: \" + str(l))\n\n\n# 定义测试步骤\ndef run_eval(sess, test_X, test_y):\n # 将测试数据以数据集的方式提供给计算图。\n ds = tf.data.Dataset.from_tensor_slices((test_X, test_y))\n ds = ds.batch(1)\n X, y = ds.make_one_shot_iterator().get_next()\n\n # 调用模型得到计算结果。这里不需要输入真实的y值。\n with tf.variable_scope(\"model\", reuse=True):\n prediction, _, _ = lstm_model(X, [0.0], False)\n\n # 将预测结果存入一个数组。\n predictions = []\n labels = []\n for i in range(TESTING_EXAMPLES):\n p, l = sess.run([prediction, y])\n predictions.append(p)\n labels.append(l)\n\n # 计算rmse作为评价指标。\n predictions = np.array(predictions).squeeze()\n labels = np.array(labels).squeeze()\n rmse = np.sqrt(((predictions - labels) ** 2).mean(axis=0))\n print(\"Root Mean Square Error is: %f\" % rmse)\n\n # 对预测的sin函数曲线进行绘图。\n plt.figure()\n plt.plot(predictions, label='predictions')\n plt.plot(labels, label='real_sin')\n plt.legend()\n plt.show()\n\n# 执行训练和测试\n# 将训练数据以数据集的方式提供给计算图。\nif __name__ == '__main__':\n # 用正弦函数生成训练和测试数据集合。\n test_start = (TRAINING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP\n test_end = test_start + (TESTING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP\n train_X, train_y = generate_data(np.sin(np.linspace(\n 0, test_start, TRAINING_EXAMPLES + TIMESTEPS, dtype=np.float32)))\n test_X, test_y = generate_data(np.sin(np.linspace(\n test_start, test_end, TESTING_EXAMPLES + TIMESTEPS, dtype=np.float32)))\n\n with tf.Session() as sess:\n # 训练模型\n train(sess,train_X,train_y)\n # # 使用训练好的模型对测试数据进行预测。\n # print\n # \"Evaluate model after training.\"\n run_eval(sess, test_X, test_y)\n"
] | [
[
"numpy.array",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.keras.layers.RNN",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.nn.rnn_cell.BasicLSTMCell",
"matplotlib.pyplot.figure",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.layers.Dense",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
DD-DeCaF/upload | [
"6f938ae3012eecac292f7a531780f9f0c9873903"
] | [
"src/upload/upload.py"
] | [
"# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability, DTU.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport pandas as pd\nfrom datetime import datetime\nfrom potion_client.exceptions import ItemNotFound\nfrom goodtables import Inspector\nfrom dateutil.parser import parse as parse_date\nimport json\nfrom os.path import abspath, join, exists\nfrom requests import HTTPError\nfrom copy import deepcopy\n\nfrom upload.constants import measurement_test, compound_skip\nfrom upload.checks import genotype_not_gnomic\nfrom upload import _isnan\n\n\nlogger = logging.getLogger(__name__)\n\ndef place_holder_compound_synonym_mapper(synonym):\n return synonym\n\n\ndef get_schema(schema_name):\n default_schemas = {'strains': 'strains_schema.json',\n 'media': 'media_schema.json',\n 'sample_information': 'sample_information_schema.json',\n 'physiology': 'physiology_schema.json',\n 'screen': 'screen_schema.json',\n 'fluxes': 'fluxes_schema.json',\n 'protein_abundances': 'protein_abundances_schema.json'}\n schema_name = default_schemas[schema_name]\n schema_dir = abspath(join(\"data\", \"schemas\"))\n schema = join(schema_dir, schema_name)\n if not exists(schema):\n raise FileNotFoundError('missing schema %s' % schema)\n return schema\n\n\nclass DataFrameInspector(object):\n \"\"\" class for inspecting a table and reading it to a DataFrame\n\n\n :param file_name: name of the csv file to read\n :param schema_name: name of the json file specifying the scheme, possibly one of the schema in this package\n without path\n :param custom_checks: list of additional custom check functions to apply\n \"\"\"\n\n def __init__(self, file_name, schema_name, custom_checks=None):\n self.schema = get_schema(schema_name)\n self.file_name = file_name\n self.custom_checks = custom_checks if custom_checks else []\n\n def inspect(self):\n \"\"\" inspect the data frame and return an error report \"\"\"\n inspector = Inspector(custom_checks=self.custom_checks, order_fields=True)\n report = inspector.inspect(self.file_name, preset='table', schema=self.schema)\n if not report['valid']:\n raise ValueError(json.dumps(report, indent=4))\n\n def __call__(self):\n \"\"\" inspect and read to DataFrame \"\"\"\n self.inspect()\n return pd.read_csv(self.file_name)\n\n\ndef inspected_data_frame(file_name, schema_name, custom_checks=None):\n \"\"\"inspect and read a csv file\n\n :param file_name: name of the csv file to read\n :param schema_name: name of the json file specifying the scheme, possibly one of the schema in this package\n without path\n :param custom_checks: list of additional custom check functions to apply\n :return DataFrame: the inspected data frame\n \"\"\"\n return DataFrameInspector(file_name=file_name, schema_name=schema_name,\n custom_checks=custom_checks)()\n\n\nclass AbstractDataUploader(object):\n \"\"\" abstract class for uploading data to iloop \"\"\"\n\n def __init__(self, project):\n self.project = project\n\n def upload(self, iloop):\n raise NotImplementedError\n\n\nclass MediaUploader(AbstractDataUploader):\n \"\"\"upload media definitions\n\n inspect file using 'media_schema.json'. Upload if no existing medium with the exact same recipe. Key for the\n medium is generated using current date.\n\n :param project: project object\n :param file_name: name of the csv file to read\n \"\"\"\n\n def __init__(self, project, file_name, custom_checks, synonym_mapper=place_holder_compound_synonym_mapper):\n super(MediaUploader, self).__init__(project)\n self.df = inspected_data_frame(file_name, 'media', custom_checks=custom_checks)\n self.iloop_args = []\n self.synonym_mapper = synonym_mapper\n self.prepare_upload()\n\n def prepare_upload(self):\n # directly naming the column 'compound' triggers a curious error when slicing\n self.df['chebi_name'] = pd.Series(\n [self.synonym_mapper(synonym) for synonym in\n self.df['compound_name']],\n index=self.df.index)\n\n self.df = self.df[self.df.chebi_name != compound_skip]\n grouped_media = self.df.groupby(['medium'])\n for medium_name, medium in grouped_media:\n ingredients_df = medium[['chebi_name', 'concentration']]\n ingredients_df.columns = ['compound', 'concentration']\n ingredients = list(ingredients_df.T.to_dict().values())\n if len(medium.pH.unique()) > 1:\n raise ValueError('expected only on pH per medium')\n ph = float(medium.iloc[0].pH)\n now = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n self.iloop_args.append((\n medium_name,\n ingredients,\n {\n 'name': medium_name,\n 'identifier': '{}_{}'.format(medium_name, now),\n 'ph': ph\n })\n )\n\n def upload(self, iloop):\n for medium_name, ingredients, item in self.iloop_args:\n for k, v in item.items():\n if isinstance(v, str):\n item[k] = v.strip()\n media_object = iloop.Medium.create(**item, organization=self.project.organization)\n media_object.update_contents(ingredients)\n\n\nclass StrainsUploader(AbstractDataUploader):\n \"\"\"upload strain definitions\n\n inspect file using 'strains_schema.json' then sort the input data frame to make sure that parents are created\n before their children to avoid broken links.\n\n :param project: project object\n :param file_name: name of the csv file to read\n \"\"\"\n\n def __init__(self, project, file_name):\n super(StrainsUploader, self).__init__(project)\n self.df = inspected_data_frame(file_name, 'strains', custom_checks=[genotype_not_gnomic])\n self.iloop_args = []\n self.prepare_upload()\n\n def prepare_upload(self):\n def depth(df, i, key, key_parent):\n if _isnan(df.loc[i][key_parent]):\n return 0\n else:\n try:\n return depth(df, df[df[key] == df.loc[i][key_parent]].index[0], key, key_parent) + 1\n except IndexError:\n return 0 # parent assumed to already be defined\n\n self.df['depth_pool'] = [depth(self.df, i, 'pool', 'parent_pool') for i in self.df.index]\n self.df['depth_strain'] = [depth(self.df, i, 'strain', 'parent_strain') for i in self.df.index]\n self.df = self.df.sort_values(by=['depth_pool', 'depth_strain'])\n for strain in self.df.itertuples():\n genotype_pool = '' if str(strain.genotype_pool) == 'nan' else strain.genotype_pool\n genotype_strain = '' if str(strain.genotype_strain) == 'nan' else strain.genotype_strain\n self.iloop_args.append({\n 'pool_alias': strain.pool,\n 'pool_type': strain.pool_type,\n 'parent_pool_alias': strain.parent_pool,\n 'genotype_pool': genotype_pool,\n 'strain_alias': strain.strain,\n 'parent_strain_alias': strain.parent_strain,\n 'genotype': genotype_strain,\n 'is_reference': bool(strain.reference),\n 'organism': strain.organism,\n 'project': self.project\n })\n\n def upload(self, iloop):\n for item in self.iloop_args:\n item = {k: v.strip() for k, v in item.items() if isinstance(v, str)}\n try:\n iloop.Strain.one(where={'alias': item['strain_alias'], 'project': self.project})\n except ItemNotFound:\n try:\n pool_object = iloop.Pool.one(where={'alias': item['pool_alias'], 'project': self.project})\n except ItemNotFound:\n parent_pool_object = None\n if 'parent_pool_alias' in item and not _isnan(item['parent_pool_alias']):\n try:\n parent_pool_object = iloop.Pool.one(where={'alias': item['parent_pool_alias'],\n 'project': self.project})\n except ItemNotFound:\n raise ItemNotFound('missing pool %s' % item['parent_strain_alias'])\n iloop.Pool.create(alias=item['pool_alias'],\n project=self.project,\n parent_pool=parent_pool_object,\n genotype=item['genotype_pool'],\n type=item['pool_type'])\n pool_object = iloop.Pool.one(where={'alias': item['pool_alias'], 'project': self.project})\n parent_object = None\n if 'parent_strain_alias' in item and not _isnan(item['parent_strain_alias']):\n try:\n parent_object = iloop.Strain.one(where={'alias': item['parent_strain_alias'],\n 'project': self.project})\n except ItemNotFound:\n raise ItemNotFound('missing strain %s' % item['parent_strain_alias'])\n iloop.Strain.create(alias=item['strain_alias'],\n pool=pool_object,\n project=self.project,\n parent_strain=parent_object,\n is_reference=bool(item.get('is_reference', False)),\n organism=item['organism'],\n genotype=item['genotype'])\n\n\nclass ExperimentUploader(AbstractDataUploader):\n \"\"\"uploader for experiment data\n \"\"\"\n\n def __init__(self, project, type, sample_name, overwrite=True,\n synonym_mapper=place_holder_compound_synonym_mapper):\n super(ExperimentUploader, self).__init__(project)\n self.overwrite = overwrite\n self.synonym_mapper = synonym_mapper\n self.type = type\n self.sample_name = sample_name\n self.experiment_keys = []\n self.assay_cols = ['unit', 'parameter', 'numerator_chebi', 'denominator_chebi']\n self.samples_df = None\n self.df = None\n\n def extra_transformations(self):\n self.df['numerator_chebi'] = self.df['numerator_compound_name'].apply(self.synonym_mapper)\n self.df['denominator_chebi'] = self.df['denominator_compound_name'].apply(self.synonym_mapper)\n self.df['test_id'] = self.df[self.assay_cols].apply(lambda x: '_'.join(str(i) for i in x), axis=1)\n if self.df[['sample_id', 'test_id']].duplicated().any():\n raise ValueError('found duplicated rows, should not have happened')\n\n def upload(self, iloop):\n pass\n\n def upload_experiment_info(self, iloop):\n conditions_keys = list(set(self.samples_df.columns.values).difference(set(self.experiment_keys)))\n grouped_experiment = self.samples_df.groupby('experiment')\n for exp_id, experiment in grouped_experiment:\n exp_info = experiment[self.experiment_keys].drop_duplicates()\n exp_info = next(exp_info.itertuples())\n try:\n existing = iloop.Experiment.one(where={'identifier': exp_id, 'project': self.project})\n timestamp = existing.date.strftime('%Y-%m-%d')\n if str(timestamp) != exp_info.date:\n if not self.overwrite:\n raise HTTPError('existing mismatching experiment %s' % exp_id)\n else:\n logger.info('archiving existing experiment {}'.format(exp_id))\n existing.archive()\n raise ItemNotFound\n except ItemNotFound:\n logger.info('creating new experiment {}'.format(exp_id))\n sample_info = experiment[conditions_keys].set_index(self.sample_name)\n conditions = _cast_non_str_to_float(experiment[self.experiment_keys].iloc[0].to_dict())\n conditions = {key: value for key, value in conditions.items() if not _isnan(value)}\n iloop.Experiment.create(project=self.project,\n type=self.type,\n identifier=exp_id,\n date=parse_date(exp_info.date),\n description=exp_info.description,\n attributes={'conditions': conditions,\n 'operation': sample_info.to_dict()['operation'],\n 'temperature': float(exp_info.temperature)})\n\n\nclass FermentationUploader(ExperimentUploader):\n \"\"\"uploader for experiment and sample descriptions and associated physiology data\n\n require two files, one that tabulates the information about an experiment and the samples associated with that\n experiment, and one for the physiology data. Validate with 'sample_information_schema.json' and\n 'physiology_schema.json' respectively. Upload first the experiment details (optionally overwrite any existing\n experiment with the same name first). Then upload the samples with associated physiology data.\n\n :param project: project object\n :param samples_file_name: name of the csv file to read\n :param physiology_file_name: name of the csv file to read\n \"\"\"\n\n def __init__(self, project, samples_file_name, physiology_file_name, custom_checks, overwrite=True,\n synonym_mapper=place_holder_compound_synonym_mapper):\n super(FermentationUploader, self).__init__(project, type='fermentation', sample_name='reactor',\n overwrite=overwrite, synonym_mapper=synonym_mapper)\n self.assay_cols.extend(['phase_start', 'phase_end'])\n self.experiment_keys = ['experiment', 'description', 'date', 'do', 'gas', 'gasflow', 'ph_set', 'ph_correction',\n 'stirrer', 'temperature']\n self.samples_df = inspected_data_frame(samples_file_name, 'sample_information', custom_checks=custom_checks)\n self.samples_df['sample_id'] = self.samples_df[['experiment', 'reactor']].apply(lambda x: '_'.join(x), axis=1)\n sample_ids = self.samples_df['sample_id'].copy()\n sample_ids.sort_values(inplace=True)\n physiology_validator = DataFrameInspector(physiology_file_name, 'physiology', custom_checks=custom_checks)\n with open(physiology_validator.schema) as json_schema:\n physiology_schema = json.load(json_schema)\n for sample_id in sample_ids:\n physiology_schema['fields'].append({\n 'name': sample_id,\n 'title': 'measurements for {}'.format(sample_id),\n 'type': 'number'\n })\n physiology_validator.schema = json.dumps(physiology_schema)\n self.physiology_df = physiology_validator()\n sample_cols = ['sample_id', 'experiment', 'reactor', 'operation',\n 'feed_medium', 'batch_medium', 'strain']\n self.df = (pd.melt(self.physiology_df,\n id_vars=['phase_start', 'phase_end', 'quantity', 'parameter',\n 'denominator_compound_name', 'numerator_compound_name', 'unit'],\n var_name='sample_id')\n .merge(self.samples_df[sample_cols], on='sample_id'))\n self.extra_transformations()\n\n def upload(self, iloop):\n self.upload_experiment_info(iloop)\n self.upload_physiology(iloop)\n\n def upload_physiology(self, iloop):\n for exp_id, experiment in self.df.groupby(['experiment']):\n scalars = []\n sample_dict = {}\n experiment_object = iloop.Experiment.one(where={'identifier': exp_id, 'project': self.project})\n sample_info = experiment[['feed_medium', 'batch_medium', 'reactor', 'strain']].drop_duplicates()\n for sample in sample_info.itertuples():\n sample_dict[sample.reactor] = {\n 'name': sample.reactor,\n 'strain': iloop.Strain.one(where={'alias': sample.strain, 'project': self.project}),\n 'medium': iloop.Medium.one(where={'name': sample.batch_medium}),\n 'feed_medium': iloop.Medium.one(where={'name': sample.feed_medium})\n }\n for phase_num, phase in experiment.groupby(['phase_start', 'phase_end']):\n phase_object = get_create_phase(iloop, float(phase.phase_start.iloc[0]),\n float(phase.phase_end.iloc[0]), experiment_object)\n for test_id, assay in phase.groupby('test_id'):\n row = assay.iloc[0].copy()\n test = measurement_test(row.unit, row.parameter, row.numerator_chebi, row.denominator_chebi,\n row.quantity)\n a_scalar = {\n 'measurements': {reactor.reactor: [float(reactor.value)] for reactor in assay.itertuples()},\n 'test': deepcopy(test),\n 'phase': phase_object\n }\n scalars.append(a_scalar)\n experiment_object.add_samples({'samples': sample_dict, 'scalars': scalars})\n\n\nclass ScreenUploader(ExperimentUploader):\n \"\"\"uploader for screening data\n \"\"\"\n\n def __init__(self, project, file_name, custom_checks, overwrite=True,\n synonym_mapper=place_holder_compound_synonym_mapper):\n super(ScreenUploader, self).__init__(project, type='screening', sample_name='well',\n overwrite=overwrite, synonym_mapper=synonym_mapper)\n self.experiment_keys = ['project', 'experiment', 'description', 'date', 'temperature']\n self.df = inspected_data_frame(file_name, 'screen', custom_checks=custom_checks)\n self.df['project'] = self.project.code\n self.df['barcode'] = self.df[['project', 'experiment', 'plate_name']].apply(lambda x: '_'.join(x), axis=1)\n self.df['well'] = self.df[['row', 'column']].apply(lambda x: ''.join(str(y) for y in x), axis=1)\n self.df['sample_id'] = self.df[['barcode', 'well']].apply(lambda x: '_'.join(x), axis=1)\n self.samples_df = self.df\n self.df.dropna(0, subset=['value'], inplace=True)\n self.extra_transformations()\n\n def upload(self, iloop):\n self.upload_experiment_info(iloop)\n self.upload_plates(iloop)\n self.upload_screen(iloop)\n\n def upload_plates(self, iloop):\n for exp_id, experiment in self.df.groupby(['experiment']):\n experiment_object = iloop.Experiment.one(where={'identifier': exp_id, 'project': self.project})\n plates_df = self.df[['experiment', 'barcode', 'well', 'medium', 'strain', 'plate_model']].drop_duplicates()\n for barcode, plate in plates_df.groupby(['barcode']):\n plate_info = plate[['well', 'medium', 'strain']].set_index('well')\n contents = {}\n for well in plate_info.itertuples():\n contents[well.Index] = {\n 'strain': iloop.Strain.one(where={'alias': well.strain, 'project': self.project}),\n 'medium': iloop.Medium.one(where={'name': well.medium})\n }\n try:\n plate = iloop.Plate.one(where={'barcode': barcode, 'project': self.project})\n plate.update_contents(contents)\n except ItemNotFound:\n iloop.Plate.create(barcode=barcode, experiment=experiment_object, contents=contents,\n type=plate.plate_model[0], project=self.project)\n\n def upload_screen(self, iloop):\n for exp_id, experiment in self.df.groupby(['experiment']):\n experiment_object = iloop.Experiment.one(where={'identifier': exp_id, 'project': self.project})\n sample_dict = {}\n scalars = []\n\n for barcode, plate in experiment.groupby(['barcode']):\n sample_info = plate[['sample_id', 'well']].drop_duplicates()\n plate_object = iloop.Plate.one(where={'barcode': barcode, 'project': self.project})\n for sample in sample_info.itertuples():\n sample_dict[sample.sample_id] = {\n 'plate': plate_object,\n 'position': sample.well,\n }\n\n for test_id, assay in experiment.groupby('test_id'):\n row = assay.iloc[0].copy()\n test = measurement_test(row.unit, row.parameter, row.numerator_chebi, row.denominator_chebi,\n row.quantity)\n\n a_scalar = {\n 'measurements': {sample.sample_id: [float(sample.value)] for sample in assay.itertuples()},\n 'test': deepcopy(test),\n }\n scalars.append(a_scalar)\n experiment_object.add_samples({'samples': sample_dict, 'scalars': scalars})\n\n\nclass XrefMeasurementUploader(ExperimentUploader):\n \"\"\"uploader for data associated with an entity define in an external database, e.g. a sequence or a reaction\n \"\"\"\n\n def __init__(self, project, file_name, custom_checks, subject_type, overwrite=True):\n super(XrefMeasurementUploader, self).__init__(project, type='fermentation', sample_name='sample_name',\n overwrite=overwrite)\n self.experiment_keys = ['project', 'experiment', 'description', 'date', 'temperature']\n inspection_key = dict(protein='protein_abundances', reaction='fluxes')[subject_type]\n self.df = inspected_data_frame(file_name, inspection_key, custom_checks=custom_checks)\n self.df['project'] = self.project.code\n self.samples_df = self.df\n self.subject_type = subject_type\n self.df.dropna(0, subset=['value'], inplace=True)\n\n def upload(self, iloop):\n self.upload_experiment_info(iloop)\n self.upload_sample_info(iloop)\n self.upload_measurements(iloop)\n\n def upload_sample_info(self, iloop):\n sample_info = self.df[['experiment', 'medium', 'sample_name', 'strain']].drop_duplicates()\n for sample in sample_info.itertuples():\n experiment = iloop.Experiment.one(where={'identifier': sample.experiment, 'project': self.project})\n try:\n return iloop.Sample.one(where={'name': sample.sample_name, 'experiment': experiment})\n except ItemNotFound:\n logger.info('creating new sample {}'.format(sample.sample_name))\n medium = iloop.Medium.one(where={'name': sample.medium})\n strain = iloop.Strain.one(where={'alias': sample.strain, 'project': self.project})\n iloop.Sample.create(experiment=experiment,\n project=self.project,\n name=sample.sample_name,\n medium=medium,\n strain=strain)\n\n def upload_measurements(self, iloop):\n accessions_df = self.df['xref_id'].str.split(':', expand=True)\n accessions_df.columns = ['db_name', 'accession']\n self.df = self.df.join(accessions_df)\n measurement_grouping = self.df.groupby(['sample_name', 'phase_start', 'phase_end'])\n unique_df = measurement_grouping[['mode', 'db_name']].nunique()\n if (unique_df['mode'] != 1).any() or (unique_df['db_name'] != 1).any():\n raise ValueError('multiple mode/db_names in upload not supported')\n for grouping, df in measurement_grouping:\n sample_name, phase_start, phase_end = grouping\n experiment_object = iloop.Experiment.one(where={'identifier': df['experiment'].iat[0],\n 'project': self.project})\n sample_object = iloop.Sample.one(where={'name': sample_name, 'experiment': experiment_object})\n phase_object = get_create_phase(iloop, float(phase_start), float(phase_end),\n sample_object.experiment)\n sample_object.add_xref_measurements(phase=phase_object, type=self.subject_type,\n values=df['value'].tolist(),\n accessions=df['accession'].tolist(),\n db_name=df['db_name'].iat[0],\n mode=df['mode'].iat[0])\n\n\ndef _cast_non_str_to_float(dictionary):\n for key in dictionary:\n if not isinstance(dictionary[key], str):\n dictionary[key] = float(dictionary[key])\n return dictionary\n\n\ndef get_create_phase(iloop, start, end, experiment):\n try:\n phase_object = iloop.ExperimentPhase.one(where={'start': start, 'end': end,\n 'experiment': experiment})\n except ItemNotFound:\n phase_object = iloop.ExperimentPhase.create(experiment=experiment,\n start=start,\n end=end,\n title='{}__{}'.format(start, end))\n return phase_object\n"
] | [
[
"pandas.melt",
"pandas.read_csv"
]
] |
kavehshamsi/scadec | [
"3cc1e0eba5db12be5b16aea7b7fd4909faf42714"
] | [
"python/sca_attack.py"
] | [
"import time\n#from pyftdi.ftdi import Ftdi\n#ft = Ftdi()\n#url = 'ftdi://ftdi:2232:FT2RTNYW/2'\n#ft.open_bitbang_from_url(url, 0b00001111)\n#print(ft.is_connected)\n#print(ft.bitbang_enabled)\n#print(ft.device_version)\n#print(ft.fifo_sizes)\n#print(ft.get_identifiers(url))\n#print(ft.has_mpsse)\n#print(ft.has_wide_port)\n#print(ft.ic_name)\n#print('{:08b}'.format(ft.read_pins()))\n#time.sleep(5)\n#exit(1)\n\nfrom pyftdi.gpio import GpioController\nimport sys\nsys.path.insert(1, '/home/kaveh/Development/eclipse/neos/script/')\nfrom circuit import *\nimport vparse\nimport numpy as np\nimport os\nimport pyserial\n\nSCA_CLK = 0\nSCA_DATA = 1\nSCA_RESET = 2\n\nBITS = [1, 2, 4, 16, 32, 64, 128, 256]\n\nNUM_INS = 51\nWAIT = 0.01\n\nclass Controller:\n def __init__(self):\n self.gp = ''\n self.state = 0\n \n self.gp = GpioController()\n self.gp.open_from_url('ftdi://ftdi:2232:FT2RTNYW/2')\n print(self.gp.is_connected)\n print('{:08b}'.format(self.gp.pins))\n print('{:08b}'.format(self.gp.direction))\n self.gp.set_direction(0b11111111, 0b11111111)\n print('after config')\n print('{:08b}'.format(self.gp.pins))\n print('{:08b}'.format(self.gp.direction))\n \n self.state |= (BITS[SCA_CLK] | BITS[SCA_DATA] | BITS[SCA_RESET])\n self.commit_state() \n return\n\n def close(self):\n self.gp.set_direction(0b11111111, 0b00000000)\n return\n \n def __del__(self):\n self.gp.set_direction(0b11111111, 0b00000000)\n return\n \n def commit_state(self):\n self.gp.write(self.state) \n return\n\n def set_pin_val(self, pin_num, val):\n bit_mask = 1 << pin_num\n print(bin(self.state))\n if val:\n self.state |= bit_mask\n else:\n self.state &= (bit_mask ^ 0xFF)\n self.commit_state()\n return\n \n def bitbang_send(self, byte):\n self.set_pin_val(SCA_CLK, 1)\n for i in range(0, 8):\n self.set_pin_val(SCA_DATA, (byte >> i))\n time.sleep(WAIT)\n self.set_pin_val(SCA_CLK, 0)\n print('state: ', bin(self.state))\n time.sleep(WAIT)\n self.set_pin_val(SCA_CLK, 1)\n \n self.set_pin_val(SCA_CLK, 1)\n time.sleep(WAIT)\n self.set_pin_val(SCA_DATA, 1)\n return\n\n def reset_receiver(self):\n self.set_pin_val(SCA_RESET, 1)\n time.sleep(WAIT)\n self.set_pin_val(SCA_RESET, 0)\n time.sleep(WAIT)\n self.set_pin_val(SCA_RESET, 1)\n time.sleep(WAIT)\n return\n\n def send_to_device(self, bit_index, invals):\n assert len(invals) == NUM_INS \n \n self.reset_receiver()\n \n num_bytes = int(NUM_INS / 8)\n bytes = []\n for nb in range(0, num_bytes):\n byte2send = 0\n for i in range(0, 8):\n bit = int(invals[nb*8 + i] != 0)\n byte2send += (bit << i)\n print(bin(byte2send)[2:].zfill(8))\n bytes.append(byte2send)\n \n for nb in bytes:\n self.bitbang_send(nb)\n \n\nclass ScaUnlock():\n def __init__(self, enc_cir_filename, sim_cir_filename):\n self.enc_cir = Circuit(enc_cir_filename)\n self.sim_cir = Circuit(sim_cir_filename)\n \n \nimport pyvisa\nimport dill\nimport matplotlib.pyplot as plt\n\nclass Oscope():\n def __init__(self): \n self.rm = pyvisa.ResourceManager()\n self.devices = self.rm.list_resources()\n print(self.devices)\n self.devnum = int(input(\"which device to pick? \"))\n self.device_addr = self.devices[self.devnum]\n #device_addr = 'USB0::10893::6039::CN57266229::0::INSTR'\n print(\"trying to connect to \", self.device_addr)\n self.scope = self.rm.open_resource(self.device_addr) # Connect to the scope using the VISA address (see below)\n #print(scope)\n print(self.scope.query('*IDN?'))\n\n return\n \n def __del__(self):\n self.scope.close()\n return\n \n def collect_trace(self):\n # Allow plots to appear inline with the IPython notebook\n # scope.read_termination = '\\n' # For ascii transfers the read is terminated with a newline character\n self.scope.write(':WAVeform:SOURce CHANnel1')\n self.scope.write(':WAVeform:FORMat ASCII')\n self.scope.write(':WAVeform:POINts 10000')\n wfm_ascii = self.scope.query(':WAVeform:DATA?') # Get the first 1000 points of waveform data\n #print(wfm_ascii)\n\n with open('dill.txt', 'wb') as dill_file:\n dill.dump(wfm_ascii, dill_file)\n \n #print(wfm_ascii)\n x = []\n y = []\n\n for st in wfm_ascii.split(','):\n if '#' not in st:\n y.append(float(st))\n \n return y\n \n def plot_trace(self, y):\n x = range(0, len(y))\n plt.plot(x, y)\n plt.show()\n return\n\n \n \nif len(sys.argv) != 3:\n print('usage: sca_unlock.py <enc_cir> <sim_cir>')\n exit(1) \n \ncnt = Controller() \nunl = ScaUnlock(sys.argv[1], sys.argv[2])\nosc = Oscope()\n#unl.enc_cir.write_bench()\nNUM_INS = unl.enc_cir.num_ins_and_keys()\nprint('NUM_INS:', NUM_INS)\n\n\nwhile True:\n instr = input('enter character to roll ')\n if instr == 'q':\n cnt.close()\n exit(1)\n invals = np.random.randint(2, size=NUM_INS)\n bit_index = np.random.randint(0, NUM_INS - 1)\n print('bit index: {}'.format(bit_index))\n print('invals: ', end='')\n for b in invals:\n print(b, end='')\n print()\n \n cnt.send_to_device(bit_index, invals)\n time.sleep(1)\n #trace = osc.collect_trace()\n #osc.plot_trace(trace)\n \n# TEST MODE\n#while True:\n# instr = input(\"enter command: \")\n# cnt.bitbang_send(ord(instr[0]))\n# \n# if instr == 'r':\n# cnt.reset_receiver()\n# else:\n# bit_index = int(input(\"enter input index: \"))\n# input_bits = ''\n# while len(input_bits) < NUM_INS:\n# input_bits += input('enter input bits ({0}/{1}): '.format(len(input_bits), NUM_INS)) \n# input_bits = list(input_bits[0:NUM_INS - 1])\n# \n# num_bytes = int(NUM_INS / 8)\n# bytes = []\n# for nb in range(0, num_bytes):\n# byte2send = 0\n# for i in range(0, 8):\n# bit = int(input_bits[nb*8 + i] == '1')\n# byte2send += (bit << i)\n# print(bin(byte2send))\n# bytes.append(byte2send)\n\n# for byte in bytes:\n# cnt.bitbang_send(byte)\n \n\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randint"
]
] |
annagruendler/haystack | [
"49886f88f02c413d07828772b82a4d79b21c893c"
] | [
"haystack/reader/farm.py"
] | [
"import logging\nimport multiprocessing\nfrom pathlib import Path\nfrom typing import List, Optional, Union, Dict, Any\nfrom collections import defaultdict\nfrom time import perf_counter\n\nimport numpy as np\nfrom farm.data_handler.data_silo import DataSilo\nfrom farm.data_handler.processor import SquadProcessor\nfrom farm.data_handler.dataloader import NamedDataLoader\nfrom farm.data_handler.inputs import QAInput, Question\nfrom farm.infer import QAInferencer\nfrom farm.modeling.optimization import initialize_optimizer\nfrom farm.modeling.predictions import QAPred, QACandidate\nfrom farm.modeling.adaptive_model import BaseAdaptiveModel, AdaptiveModel\nfrom farm.train import Trainer\nfrom farm.eval import Evaluator\nfrom farm.utils import set_all_seeds, initialize_device_settings\nfrom scipy.special import expit\nimport shutil\n\nfrom haystack import Document\nfrom haystack.document_store.base import BaseDocumentStore\nfrom haystack.reader.base import BaseReader\n\nlogger = logging.getLogger(__name__)\n\n\nclass FARMReader(BaseReader):\n \"\"\"\n Transformer based model for extractive Question Answering using the FARM framework (https://github.com/deepset-ai/FARM).\n While the underlying model can vary (BERT, Roberta, DistilBERT, ...), the interface remains the same.\n\n | With a FARMReader, you can:\n\n - directly get predictions via predict()\n - fine-tune the model on QA data via train()\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: Union[str, Path],\n model_version: Optional[str] = None,\n context_window_size: int = 150,\n batch_size: int = 50,\n use_gpu: bool = True,\n no_ans_boost: float = 0.0,\n return_no_answer: bool = False,\n top_k: int = 10,\n top_k_per_candidate: int = 3,\n top_k_per_sample: int = 1,\n num_processes: Optional[int] = None,\n max_seq_len: int = 256,\n doc_stride: int = 128,\n progress_bar: bool = True,\n duplicate_filtering: int = 0\n ):\n\n \"\"\"\n :param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased',\n 'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'.\n See https://huggingface.co/models for full list of available models.\n :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.\n :param context_window_size: The size, in characters, of the window around the answer span that is used when\n displaying the context around the answer.\n :param batch_size: Number of samples the model receives in one batch for inference.\n Memory consumption is much lower in inference mode. Recommendation: Increase the batch size\n to a value so only a single batch is used.\n :param use_gpu: Whether to use GPU (if available)\n :param no_ans_boost: How much the no_answer logit is boosted/increased.\n If set to 0 (default), the no_answer logit is not changed.\n If a negative number, there is a lower chance of \"no_answer\" being predicted.\n If a positive number, there is an increased chance of \"no_answer\"\n :param return_no_answer: Whether to include no_answer predictions in the results.\n :param top_k: The maximum number of answers to return\n :param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text).\n Note that this is not the number of \"final answers\" you will receive\n (see `top_k` in FARMReader.predict() or Finder.get_answers() for that)\n and that FARM includes no_answer in the sorted list of predictions.\n :param top_k_per_sample: How many answers to extract from each small text passage that the model can process at once\n (one \"candidate doc\" is usually split into many smaller \"passages\").\n You usually want a very small value here, as it slows down inference\n and you don't gain much of quality by having multiple answers from one passage.\n Note that this is not the number of \"final answers\" you will receive\n (see `top_k` in FARMReader.predict() or Finder.get_answers() for that)\n and that FARM includes no_answer in the sorted list of predictions.\n :param num_processes: The number of processes for `multiprocessing.Pool`. Set to value of 0 to disable\n multiprocessing. Set to None to let Inferencer determine optimum number. If you\n want to debug the Language Model, you might need to disable multiprocessing!\n :param max_seq_len: Max sequence length of one input text for the model\n :param doc_stride: Length of striding window for splitting long texts (used if ``len(text) > max_seq_len``)\n :param progress_bar: Whether to show a tqdm progress bar or not.\n Can be helpful to disable in production deployments to keep the logs clean.\n :param duplicate_filtering: Answers are filtered based on their position. Both start and end position of the answers are considered.\n The higher the value, answers that are more apart are filtered out. 0 corresponds to exact duplicates. -1 turns off duplicate removal.\n \"\"\"\n\n # save init parameters to enable export of component config as YAML\n self.set_config(\n model_name_or_path=model_name_or_path, model_version=model_version, context_window_size=context_window_size,\n batch_size=batch_size, use_gpu=use_gpu, no_ans_boost=no_ans_boost, return_no_answer=return_no_answer,\n top_k=top_k, top_k_per_candidate=top_k_per_candidate, top_k_per_sample=top_k_per_sample,\n num_processes=num_processes, max_seq_len=max_seq_len, doc_stride=doc_stride, progress_bar=progress_bar,\n duplicate_filtering=duplicate_filtering\n )\n\n self.return_no_answers = return_no_answer\n self.top_k = top_k\n self.top_k_per_candidate = top_k_per_candidate\n self.inferencer = QAInferencer.load(model_name_or_path, batch_size=batch_size, gpu=use_gpu,\n task_type=\"question_answering\", max_seq_len=max_seq_len,\n doc_stride=doc_stride, num_processes=num_processes, revision=model_version,\n disable_tqdm=not progress_bar,\n strict=False)\n self.inferencer.model.prediction_heads[0].context_window_size = context_window_size\n self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost\n self.inferencer.model.prediction_heads[0].n_best = top_k_per_candidate + 1 # including possible no_answer\n try:\n self.inferencer.model.prediction_heads[0].n_best_per_sample = top_k_per_sample\n except:\n logger.warning(\"Could not set `top_k_per_sample` in FARM. Please update FARM version.\")\n try:\n self.inferencer.model.prediction_heads[0].duplicate_filtering = duplicate_filtering\n except:\n logger.warning(\"Could not set `duplicate_filtering` in FARM. Please update FARM version.\")\n self.max_seq_len = max_seq_len\n self.use_gpu = use_gpu\n self.progress_bar = progress_bar\n\n def train(\n self,\n data_dir: str,\n train_filename: str,\n dev_filename: Optional[str] = None,\n test_filename: Optional[str] = None,\n use_gpu: Optional[bool] = None,\n batch_size: int = 10,\n n_epochs: int = 2,\n learning_rate: float = 1e-5,\n max_seq_len: Optional[int] = None,\n warmup_proportion: float = 0.2,\n dev_split: float = 0,\n evaluate_every: int = 300,\n save_dir: Optional[str] = None,\n num_processes: Optional[int] = None,\n use_amp: str = None,\n ):\n \"\"\"\n Fine-tune a model on a QA dataset. Options:\n\n - Take a plain language model (e.g. `bert-base-cased`) and train it for QA (e.g. on SQuAD data)\n - Take a QA model (e.g. `deepset/bert-base-cased-squad2`) and fine-tune it for your domain (e.g. using your labels collected via the haystack annotation tool)\n\n :param data_dir: Path to directory containing your training data in SQuAD style\n :param train_filename: Filename of training data\n :param dev_filename: Filename of dev / eval data\n :param test_filename: Filename of test data\n :param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here\n that gets split off from training data for eval.\n :param use_gpu: Whether to use GPU (if available)\n :param batch_size: Number of samples the model receives in one batch for training\n :param n_epochs: Number of iterations on the whole training data set\n :param learning_rate: Learning rate of the optimizer\n :param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.\n :param warmup_proportion: Proportion of training steps until maximum learning rate is reached.\n Until that point LR is increasing linearly. After that it's decreasing again linearly.\n Options for different schedules are available in FARM.\n :param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset\n :param save_dir: Path to store the final model\n :param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.\n Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.\n Set to None to use all CPU cores minus one.\n :param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.\n Available options:\n None (Don't use AMP)\n \"O0\" (Normal FP32 training)\n \"O1\" (Mixed Precision => Recommended)\n \"O2\" (Almost FP16)\n \"O3\" (Pure FP16).\n See details on: https://nvidia.github.io/apex/amp.html\n :return: None\n \"\"\"\n\n if dev_filename:\n dev_split = 0\n\n if num_processes is None:\n num_processes = multiprocessing.cpu_count() - 1 or 1\n\n set_all_seeds(seed=42)\n\n # For these variables, by default, we use the value set when initializing the FARMReader.\n # These can also be manually set when train() is called if you want a different value at train vs inference\n if use_gpu is None:\n use_gpu = self.use_gpu\n if max_seq_len is None:\n max_seq_len = self.max_seq_len\n\n device, n_gpu = initialize_device_settings(use_cuda=use_gpu,use_amp=use_amp)\n\n if not save_dir:\n save_dir = f\"../../saved_models/{self.inferencer.model.language_model.name}\"\n\n # 1. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset\n label_list = [\"start_token\", \"end_token\"]\n metric = \"squad\"\n processor = SquadProcessor(\n tokenizer=self.inferencer.processor.tokenizer,\n max_seq_len=max_seq_len,\n label_list=label_list,\n metric=metric,\n train_filename=train_filename,\n dev_filename=dev_filename,\n dev_split=dev_split,\n test_filename=test_filename,\n data_dir=Path(data_dir),\n )\n\n # 2. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them\n # and calculates a few descriptive statistics of our datasets\n data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=False, max_processes=num_processes)\n\n # Quick-fix until this is fixed upstream in FARM:\n # We must avoid applying DataParallel twice (once when loading the inferencer,\n # once when calling initalize_optimizer)\n self.inferencer.model.save(\"tmp_model\")\n model = BaseAdaptiveModel.load(load_dir=\"tmp_model\", device=device, strict=True)\n shutil.rmtree('tmp_model')\n\n # 3. Create an optimizer and pass the already initialized model\n model, optimizer, lr_schedule = initialize_optimizer(\n model=model,\n # model=self.inferencer.model,\n learning_rate=learning_rate,\n schedule_opts={\"name\": \"LinearWarmup\", \"warmup_proportion\": warmup_proportion},\n n_batches=len(data_silo.loaders[\"train\"]),\n n_epochs=n_epochs,\n device=device,\n use_amp=use_amp,\n )\n # 4. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time\n trainer = Trainer(\n model=model,\n optimizer=optimizer,\n data_silo=data_silo,\n epochs=n_epochs,\n n_gpu=n_gpu,\n lr_schedule=lr_schedule,\n evaluate_every=evaluate_every,\n device=device,\n use_amp=use_amp,\n disable_tqdm=not self.progress_bar\n )\n\n\n # 5. Let it grow!\n self.inferencer.model = trainer.train()\n self.save(Path(save_dir))\n\n def update_parameters(\n self,\n context_window_size: Optional[int] = None,\n no_ans_boost: Optional[float] = None,\n return_no_answer: Optional[bool] = None,\n max_seq_len: Optional[int] = None,\n doc_stride: Optional[int] = None,\n ):\n \"\"\"\n Hot update parameters of a loaded Reader. It may not to be safe when processing concurrent requests.\n \"\"\"\n if no_ans_boost is not None:\n self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost\n if return_no_answer is not None:\n self.return_no_answers = return_no_answer\n if doc_stride is not None:\n self.inferencer.processor.doc_stride = doc_stride\n if context_window_size is not None:\n self.inferencer.model.prediction_heads[0].context_window_size = context_window_size\n if max_seq_len is not None:\n self.inferencer.processor.max_seq_len = max_seq_len\n self.max_seq_len = max_seq_len\n\n def save(self, directory: Path):\n \"\"\"\n Saves the Reader model so that it can be reused at a later point in time.\n\n :param directory: Directory where the Reader model should be saved\n \"\"\"\n logger.info(f\"Saving reader model to {directory}\")\n self.inferencer.model.save(directory)\n self.inferencer.processor.save(directory)\n\n def predict_batch(self, query_doc_list: List[dict], top_k: int = None, batch_size: int = None):\n \"\"\"\n Use loaded QA model to find answers for a list of queries in each query's supplied list of Document.\n\n Returns list of dictionaries containing answers sorted by (desc.) probability\n\n :param query_doc_list: List of dictionaries containing queries with their retrieved documents\n :param top_k: The maximum number of answers to return for each query\n :param batch_size: Number of samples the model receives in one batch for inference\n :return: List of dictionaries containing query and answers\n \"\"\"\n\n if top_k is None:\n top_k = self.top_k\n # convert input to FARM format\n inputs = []\n number_of_docs = []\n labels = []\n\n # build input objects for inference_from_objects\n for query_with_docs in query_doc_list:\n documents = query_with_docs[\"docs\"]\n query = query_with_docs[\"question\"]\n labels.append(query)\n number_of_docs.append(len(documents))\n\n for doc in documents:\n cur = QAInput(doc_text=doc.text,\n questions=Question(text=query.question,\n uid=doc.id))\n inputs.append(cur)\n\n self.inferencer.batch_size = batch_size\n # make predictions on all document-query pairs\n predictions = self.inferencer.inference_from_objects(\n objects=inputs, return_json=False, multiprocessing_chunksize=10\n )\n\n # group predictions together\n grouped_predictions = []\n left_idx = 0\n right_idx = 0\n for number in number_of_docs:\n right_idx = left_idx + number\n grouped_predictions.append(predictions[left_idx:right_idx])\n left_idx = right_idx\n\n result = []\n for idx, group in enumerate(grouped_predictions):\n answers, max_no_ans_gap = self._extract_answers_of_predictions(group, top_k)\n query = group[0].question\n cur_label = labels[idx]\n result.append({\n \"query\": query,\n \"no_ans_gap\": max_no_ans_gap,\n \"answers\": answers,\n \"label\": cur_label\n })\n\n return result\n\n def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):\n \"\"\"\n Use loaded QA model to find answers for a query in the supplied list of Document.\n\n Returns dictionaries containing answers sorted by (desc.) probability.\n Example:\n ```python\n |{\n | 'query': 'Who is the father of Arya Stark?',\n | 'answers':[\n | {'answer': 'Eddard,',\n | 'context': \" She travels with her father, Eddard, to King's Landing when he is \",\n | 'offset_answer_start': 147,\n | 'offset_answer_end': 154,\n | 'probability': 0.9787139466668613,\n | 'score': None,\n | 'document_id': '1337'\n | },...\n | ]\n |}\n ```\n\n :param query: Query string\n :param documents: List of Document in which to search for the answer\n :param top_k: The maximum number of answers to return\n :return: Dict containing query and answers\n \"\"\"\n if top_k is None:\n top_k = self.top_k\n # convert input to FARM format\n inputs = []\n for doc in documents:\n cur = QAInput(doc_text=doc.text,\n questions=Question(text=query,\n uid=doc.id))\n inputs.append(cur)\n\n # get answers from QA model\n # TODO: Need fix in FARM's `to_dict` function of `QAInput` class\n predictions = self.inferencer.inference_from_objects(\n objects=inputs, return_json=False, multiprocessing_chunksize=1\n )\n # assemble answers from all the different documents & format them.\n answers, max_no_ans_gap = self._extract_answers_of_predictions(predictions, top_k)\n result = {\"query\": query,\n \"no_ans_gap\": max_no_ans_gap,\n \"answers\": answers}\n\n return result\n\n def eval_on_file(self, data_dir: str, test_filename: str, device: str):\n \"\"\"\n Performs evaluation on a SQuAD-formatted file.\n Returns a dict containing the following metrics:\n - \"EM\": exact match score\n - \"f1\": F1-Score\n - \"top_n_accuracy\": Proportion of predicted answers that overlap with correct answer\n\n :param data_dir: The directory in which the test set can be found\n :type data_dir: Path or str\n :param test_filename: The name of the file containing the test data in SQuAD format.\n :type test_filename: str\n :param device: The device on which the tensors should be processed. Choose from \"cpu\" and \"cuda\".\n :type device: str\n \"\"\"\n eval_processor = SquadProcessor(\n tokenizer=self.inferencer.processor.tokenizer,\n max_seq_len=self.inferencer.processor.max_seq_len,\n label_list=self.inferencer.processor.tasks[\"question_answering\"][\"label_list\"],\n metric=self.inferencer.processor.tasks[\"question_answering\"][\"metric\"],\n train_filename=None,\n dev_filename=None,\n dev_split=0,\n test_filename=test_filename,\n data_dir=Path(data_dir),\n )\n\n data_silo = DataSilo(processor=eval_processor, batch_size=self.inferencer.batch_size, distributed=False)\n data_loader = data_silo.get_data_loader(\"test\")\n\n evaluator = Evaluator(data_loader=data_loader, tasks=eval_processor.tasks, device=device)\n\n eval_results = evaluator.eval(self.inferencer.model)\n results = {\n \"EM\": eval_results[0][\"EM\"],\n \"f1\": eval_results[0][\"f1\"],\n \"top_n_accuracy\": eval_results[0][\"top_n_accuracy\"]\n }\n return results\n\n def eval(\n self,\n document_store: BaseDocumentStore,\n device: str,\n label_index: str = \"label\",\n doc_index: str = \"eval_document\",\n label_origin: str = \"gold_label\",\n calibrate_conf_scores: bool = False\n ):\n \"\"\"\n Performs evaluation on evaluation documents in the DocumentStore.\n Returns a dict containing the following metrics:\n - \"EM\": Proportion of exact matches of predicted answers with their corresponding correct answers\n - \"f1\": Average overlap between predicted answers and their corresponding correct answers\n - \"top_n_accuracy\": Proportion of predicted answers that overlap with correct answer\n\n :param document_store: DocumentStore containing the evaluation documents\n :param device: The device on which the tensors should be processed. Choose from \"cpu\" and \"cuda\".\n :param label_index: Index/Table name where labeled questions are stored\n :param doc_index: Index/Table name where documents that are used for evaluation are stored\n :param label_origin: Field name where the gold labels are stored\n :param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores\n \"\"\"\n\n if self.top_k_per_candidate != 4:\n logger.info(f\"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \\n\"\n f\"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \\n\"\n f\"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5\")\n\n # extract all questions for evaluation\n filters = {\"origin\": [label_origin]}\n\n labels = document_store.get_all_labels(index=label_index, filters=filters)\n\n # Aggregate all answer labels per question\n aggregated_per_doc = defaultdict(list)\n for label in labels:\n if not label.document_id:\n logger.error(f\"Label does not contain a document_id\")\n continue\n aggregated_per_doc[label.document_id].append(label)\n\n # Create squad style dicts\n d: Dict[str, Any] = {}\n all_doc_ids = [x.id for x in document_store.get_all_documents(doc_index)]\n for doc_id in all_doc_ids:\n doc = document_store.get_document_by_id(doc_id, index=doc_index)\n if not doc:\n logger.error(f\"Document with the ID '{doc_id}' is not present in the document store.\")\n continue\n d[str(doc_id)] = {\n \"context\": doc.text\n }\n # get all questions / answers\n aggregated_per_question: Dict[tuple, Any] = defaultdict(list)\n id_question_tuple = (label.id, label.question)\n if doc_id in aggregated_per_doc:\n for label in aggregated_per_doc[doc_id]:\n # add to existing answers\n if id_question_tuple in aggregated_per_question.keys():\n if label.offset_start_in_doc == 0 and label.answer == \"\":\n continue\n else:\n # Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max\n if len(aggregated_per_question[id_question_tuple][\"answers\"]) >= 6:\n logger.warning(f\"Answers in this sample are being dropped because it has more than 6 answers. (doc_id: {doc_id}, question: {label.question}, label_id: {label.id})\")\n continue\n aggregated_per_question[id_question_tuple][\"answers\"].append({\n \"text\": label.answer,\n \"answer_start\": label.offset_start_in_doc})\n aggregated_per_question[id_question_tuple][\"is_impossible\"] = False\n # create new one\n else:\n # We don't need to create an answer dict if is_impossible / no_answer\n if label.offset_start_in_doc == 0 and label.answer == \"\":\n aggregated_per_question[id_question_tuple] = {\n \"id\": str(hash(str(doc_id) + label.question)),\n \"question\": label.question,\n \"answers\": [],\n \"is_impossible\": True\n }\n else:\n aggregated_per_question[id_question_tuple] = {\n \"id\": str(hash(str(doc_id)+label.question)),\n \"question\": label.question,\n \"answers\": [{\n \"text\": label.answer,\n \"answer_start\": label.offset_start_in_doc}],\n \"is_impossible\": False\n }\n\n # Get rid of the question key again (after we aggregated we don't need it anymore)\n d[str(doc_id)][\"qas\"] = [v for v in aggregated_per_question.values()]\n\n # Convert input format for FARM\n farm_input = [v for v in d.values()]\n n_queries = len([y for x in farm_input for y in x[\"qas\"]])\n\n # Create DataLoader that can be passed to the Evaluator\n tic = perf_counter()\n indices = range(len(farm_input))\n dataset, tensor_names, problematic_ids = self.inferencer.processor.dataset_from_dicts(farm_input, indices=indices)\n data_loader = NamedDataLoader(dataset=dataset, batch_size=self.inferencer.batch_size, tensor_names=tensor_names)\n\n evaluator = Evaluator(data_loader=data_loader, tasks=self.inferencer.processor.tasks, device=device)\n\n eval_results = evaluator.eval(self.inferencer.model, calibrate_conf_scores=calibrate_conf_scores)\n toc = perf_counter()\n reader_time = toc - tic\n results = {\n \"EM\": eval_results[0][\"EM\"] * 100,\n \"f1\": eval_results[0][\"f1\"] * 100,\n \"top_n_accuracy\": eval_results[0][\"top_n_accuracy\"] * 100,\n \"top_n\": self.inferencer.model.prediction_heads[0].n_best,\n \"reader_time\": reader_time,\n \"seconds_per_query\": reader_time / n_queries\n }\n return results\n\n def _extract_answers_of_predictions(self, predictions: List[QAPred], top_k: Optional[int] = None):\n # Assemble answers from all the different documents and format them.\n # For the 'no answer' option, we collect all no_ans_gaps and decide how likely\n # a no answer is based on all no_ans_gaps values across all documents\n answers = []\n no_ans_gaps = []\n best_score_answer = 0\n\n for pred in predictions:\n answers_per_document = []\n no_ans_gaps.append(pred.no_answer_gap)\n for ans in pred.prediction:\n # skip 'no answers' here\n if self._check_no_answer(ans):\n pass\n else:\n cur = {\n \"answer\": ans.answer,\n \"score\": ans.score,\n # just a pseudo prob for now\n \"probability\": ans.confidence,\n \"context\": ans.context_window,\n \"offset_start\": ans.offset_answer_start - ans.offset_context_window_start,\n \"offset_end\": ans.offset_answer_end - ans.offset_context_window_start,\n \"offset_start_in_doc\": ans.offset_answer_start,\n \"offset_end_in_doc\": ans.offset_answer_end,\n \"document_id\": pred.id\n }\n answers_per_document.append(cur)\n\n if ans.score > best_score_answer:\n best_score_answer = ans.score\n\n # Only take n best candidates. Answers coming back from FARM are sorted with decreasing relevance\n answers += answers_per_document[:self.top_k_per_candidate]\n\n # calculate the score for predicting 'no answer', relative to our best positive answer score\n no_ans_prediction, max_no_ans_gap = self._calc_no_answer(no_ans_gaps, best_score_answer)\n if self.return_no_answers:\n answers.append(no_ans_prediction)\n\n # sort answers by score and select top-k\n answers = sorted(answers, key=lambda k: k[\"score\"], reverse=True)\n answers = answers[:top_k]\n\n return answers, max_no_ans_gap\n\n def calibrate_confidence_scores(\n self,\n document_store: BaseDocumentStore,\n device: str,\n label_index: str = \"label\",\n doc_index: str = \"eval_document\",\n label_origin: str = \"gold_label\"\n ):\n \"\"\"\n Calibrates confidence scores on evaluation documents in the DocumentStore.\n\n :param document_store: DocumentStore containing the evaluation documents\n :param device: The device on which the tensors should be processed. Choose from \"cpu\" and \"cuda\".\n :param label_index: Index/Table name where labeled questions are stored\n :param doc_index: Index/Table name where documents that are used for evaluation are stored\n :param label_origin: Field name where the gold labels are stored\n \"\"\"\n self.eval(document_store=document_store,\n device=device,\n label_index=label_index,\n doc_index=doc_index,\n label_origin=label_origin,\n calibrate_conf_scores=True)\n\n @staticmethod\n def _get_pseudo_prob(score: float):\n return float(expit(np.asarray(score) / 8))\n\n @staticmethod\n def _check_no_answer(c: QACandidate):\n # check for correct value in \"answer\"\n if c.offset_answer_start == 0 and c.offset_answer_end == 0:\n if c.answer != \"no_answer\":\n logger.error(\"Invalid 'no_answer': Got a prediction for position 0, but answer string is not 'no_answer'\")\n if c.answer == \"no_answer\":\n return True\n else:\n return False\n\n def predict_on_texts(self, question: str, texts: List[str], top_k: Optional[int] = None):\n \"\"\"\n Use loaded QA model to find answers for a question in the supplied list of Document.\n Returns dictionaries containing answers sorted by (desc.) probability.\n Example:\n ```python\n |{\n | 'question': 'Who is the father of Arya Stark?',\n | 'answers':[\n | {'answer': 'Eddard,',\n | 'context': \" She travels with her father, Eddard, to King's Landing when he is \",\n | 'offset_answer_start': 147,\n | 'offset_answer_end': 154,\n | 'probability': 0.9787139466668613,\n | 'score': None,\n | 'document_id': '1337'\n | },...\n | ]\n |}\n ```\n\n :param question: Question string\n :param documents: List of documents as string type\n :param top_k: The maximum number of answers to return\n :return: Dict containing question and answers\n \"\"\"\n documents = []\n for text in texts:\n documents.append(\n Document(\n text=text\n )\n )\n predictions = self.predict(question, documents, top_k)\n return predictions\n\n @classmethod\n def convert_to_onnx(\n cls,\n model_name: str,\n output_path: Path,\n convert_to_float16: bool = False,\n quantize: bool = False,\n task_type: str = \"question_answering\",\n opset_version: int = 11\n ):\n \"\"\"\n Convert a PyTorch BERT model to ONNX format and write to ./onnx-export dir. The converted ONNX model\n can be loaded with in the `FARMReader` using the export path as `model_name_or_path` param.\n\n Usage:\n\n `from haystack.reader.farm import FARMReader\n from pathlib import Path\n onnx_model_path = Path(\"roberta-onnx-model\")\n FARMReader.convert_to_onnx(model_name=\"deepset/bert-base-cased-squad2\", output_path=onnx_model_path)\n reader = FARMReader(onnx_model_path)`\n\n :param model_name: transformers model name\n :param output_path: Path to output the converted model\n :param convert_to_float16: Many models use float32 precision by default. With the half precision of float16,\n inference is faster on Nvidia GPUs with Tensor core like T4 or V100. On older GPUs,\n float32 could still be be more performant.\n :param quantize: convert floating point number to integers\n :param task_type: Type of task for the model. Available options: \"question_answering\" or \"embeddings\".\n :param opset_version: ONNX opset version\n \"\"\"\n AdaptiveModel.convert_to_onnx(\n model_name=model_name,\n output_path=output_path,\n task_type=task_type,\n convert_to_float16=convert_to_float16,\n quantize=quantize,\n opset_version=opset_version\n )\n\n"
] | [
[
"numpy.asarray"
]
] |
ni30kp/HR_Employee_Wellbeing | [
"fa2ef37a365bc0671ae6fbbc354755341f7bed5f"
] | [
"main.py"
] | [
"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport pandas as pd\nimport numpy as np\n\n\nbusiness_travel=3\ndept=3\nedu_field=3\ngender=3\njob_role=3\nmartital=3\nover_time=3\n\n\ndef get_entry_data():\n age = int(el1.get())\n\n if el2.get() == \"Non Travel\":\n business_travel=0\n elif el2.get() == \"Travel Frequently\":\n business_travel=1\n elif el2.get() == \"Travel Rarely\":\n business_travel=2\n\n daily_rate = int(el3.get())\n\n if el4.get() == \"Human Resources\":\n dept=0\n elif el4.get() == \"Research & Development\":\n dept=1\n elif el4.get() == \"Sales\":\n dept=2\n\n dist_home = int(el5.get())\n\n education = int(el6.get())\n\n if el7.get() == \"Human Resources\":\n edu_field=0\n elif el7.get() == \"Life Sciences\":\n edu_field=1\n elif el7.get() == \"Marketing\":\n edu_field=2\n elif el7.get() == \"Medical\":\n \tedu_field=3\n elif el7.get() == \"Other\":\n \tedu_field=4\n\n Environment_satification = int(el8.get())\n\n if el9.get() == \"Female\":\n gender=0\n elif el9.get() == \"Male\":\n gender=1 \n\n Hourly_rate = int(el10.get())\n\n job_invol = int(el11.get())\n\n job_level = int(el12.get())\n\n if el13.get() == \"Health Care Representative\":\n job_role=0\n elif el13.get() == \"Human Resources\":\n job_role=1\n elif el13.get() == \"Laboratory Technician\":\n job_role=2\n elif el13.get() == \"Manager\":\n \tjob_role=3\n elif el13.get() == \"Manufacturing Director\":\n \tjob_role=4\n elif el13.get() == \"Research Director\":\n \tjob_role=5\n elif el13.get() == \"Research Scientist\":\n \tjob_role=6\n elif el13.get() == \"Sales Executive\":\n \tjob_role=7\n\n job_satisifaction = int(el14.get())\n\n if el15.get() == \"Single\":\n martital=0\n elif el15.get() == \"Divorced\":\n martital=1\n elif el15.get() == \"Married\":\n martital=2\n\n monthly_income = int(el16.get())\n\n monthly_rate = int(el17.get())\n\n num_company = int(el18.get())\n\n if el19.get() == \"No\":\n over_time=0\n elif el19.get() == \"Yes\":\n over_time=1\n\n salary_hike = int(el20.get())\n\n performance_rate = int(el21.get()) \n\n relationship_satisfaction = int(el22.get())\n\n total_work_years = int(el23.get())\n\n training_time = int(el24.get())\n\n work_life_bal = int(el25.get()) \n\n year_at_company = int(el26.get())\n\n year_in_role = int(el27.get())\n\n last_promotion = int(el28.get())\n\n current_manager = int(el29.get())\n\n y_pred = []\n\n y_pred.append(age)\n y_pred.append(business_travel)\n y_pred.append(daily_rate)\n y_pred.append(dept)\n y_pred.append(dist_home)\n y_pred.append(education)\n y_pred.append(edu_field)\n y_pred.append(Environment_satification)\n y_pred.append(gender)\n y_pred.append(Hourly_rate)\n y_pred.append(job_invol)\n y_pred.append(job_level)\n y_pred.append(job_role)\n y_pred.append(job_satisifaction)\n y_pred.append(martital)\n y_pred.append(monthly_income)\n y_pred.append(monthly_rate)\n y_pred.append(num_company)\n y_pred.append(over_time)\n y_pred.append(salary_hike)\n y_pred.append(performance_rate)\n y_pred.append(relationship_satisfaction)\n y_pred.append(total_work_years)\n y_pred.append(training_time)\n y_pred.append(work_life_bal)\n y_pred.append(year_at_company)\n y_pred.append(year_in_role)\n y_pred.append(last_promotion)\n y_pred.append(current_manager)\n\n y_pred = np.reshape(y_pred,(1,-1))\n\n print(y_pred)\n\n from pandas import DataFrame\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n from sklearn import preprocessing\n import math\n from sklearn.model_selection import train_test_split\n from sklearn import metrics \n\n data = pd.read_csv('WA_Fn-UseC_-HR-Employee-Attrition.csv')\n data = data.drop(columns=['StandardHours','EmployeeCount','Over18','EmployeeNumber','StockOptionLevel'])\n\n le = preprocessing.LabelEncoder()\n categorial_variables = ['Attrition','BusinessTravel','Department','EducationField','Gender','JobRole','MaritalStatus','OverTime']\n for i in categorial_variables:\n data[i] = le.fit_transform(data[i])\n data.head(5)\n data.to_csv('LabelEncoded_CleanData.csv')\n\n target = data['Attrition']\n train = data.drop('Attrition',axis = 1)\n\n #def train_test_error(y_train,y_test):\n #\ttest_error = ((y_test==Y_test).sum())/len(Y_test)*10\n #\ttest_accuracy = test_error\n\n X_train, X_test, Y_train, Y_test = train_test_split(train, target, test_size=0.33, random_state=42)\n \n from sklearn.linear_model import LogisticRegression\n log_reg = LogisticRegression()\n log_reg.fit(X_train,Y_train)\n test_predict = log_reg.predict(X_test)\n test_accuracy = log_reg.score(X_test,Y_test)\n #train_test_error(train_predict , test_predict)\n\n txt = log_reg.predict(y_pred)\n\n if txt == 0:\n \tmessagebox.showinfo(\"ATTRITION\", \"EMPLOYEE WILL STAY\")\n else:\n messagebox.showinfo(\"ATTRITION\", \"CHANCE OF EMPLOYEE WILL LEAVE\")\n\ngui = Tk()\n\ngui.title('HR ANALYSIS OF EMPLOYEE ATTRITION GUI')\ngui.geometry('600x700')\ngui.configure(background=\"light blue\")\n\nclass TableDropDown(ttk.Combobox):\n def __init__(self, parent):\n self.current_table = tk.StringVar() # create variable for table\n ttk.Combobox.__init__(self, parent)# init widget\n self.config(textvariable = self.current_table, state = \"readonly\", values = [\"Customers\", \"Pets\", \"Invoices\", \"Prices\"])\n self.current(0) # index of values for current table\n self.place(x = 50, y = 50, anchor = \"w\") # place drop down box \n\nl1=Label(gui, text='Enter Age')\nl1.grid(row=0, column=0,padx=10,pady=2)\nel1 = Entry(gui)\nel1.grid(row=0, column=1)\n\nl2=Label(gui, text='Business travel:')\nl2.grid(row=2, column=0,padx=10,pady=2)\nel2 = ttk.Combobox(gui, width=\"18\", values=(\"Non Travel\",\"Travel Frequently\",\"Travel Rarely\"))\nel2.grid(row=2, column=1)\n\nl3=Label(gui, text='Enter Daily Rate')\nl3.grid(row=3, column=0,padx=10,pady=2)\nel3 = Entry(gui)\nel3.grid(row=3, column=1)\n\nl4=Label(gui, text='Enter Department')\nl4.grid(row=4, column=0,padx=10,pady=2)\nel4 = ttk.Combobox(gui, width=\"18\", values=(\"Human Resources\",\"Research & Development\",\"Sales\"))\nel4.grid(row=4, column=1)\n\nl5=Label(gui, text='Enter Distance From Home')\nl5.grid(row=5, column=0,padx=10,pady=2)\nel5 = Entry(gui)\nel5.grid(row=5, column=1)\n\nl6=Label(gui, text='Enter Education(Rating out of 5)')\nl6.grid(row=6, column=0,padx=10,pady=2)\nel6 = Entry(gui)\nel6.grid(row=6, column=1)\n\nl7=Label(gui, text='Enter Education Field')\nl7.grid(row=7, column=0,padx=10,pady=2)\nel7 = ttk.Combobox(gui, width=\"18\", values=(\"Human Resources\",\"Life Sciences\",\"Marketing\",\"Medical\",\"Other\"))\nel7.grid(row=7, column=1)\n\nl8=Label(gui, text='Enter Environment Satisfaction(Rating out of 5)')\nl8.grid(row=8, column=0,padx=10,pady=2)\nel8 = Entry(gui)\nel8.grid(row=8, column=1)\n\nl9=Label(gui, text='Enter Gender')\nl9.grid(row=9, column=0,padx=10,pady=2)\nel9 = ttk.Combobox(gui, width=\"18\", values=(\"Female\",\"Male\"))\nel9.grid(row=9, column=1)\n\nl10=Label(gui, text='Enter Hourly Rate')\nl10.grid(row=10, column=0,padx=10,pady=2)\nel10 = Entry(gui)\nel10.grid(row=10, column=1)\n\nl11=Label(gui, text='Enter Job Involvement(Rating out of 5)')\nl11.grid(row=11, column=0,padx=10,pady=2)\nel11 = Entry(gui)\nel11.grid(row=11, column=1)\n\nl12=Label(gui, text='Enter Job Level(Rating out of 5)')\nl12.grid(row=12, column=0,padx=10,pady=2)\nel12 = Entry(gui)\nel12.grid(row=12, column=1)\n\nl13=Label(gui, text='Enter Job Role')\nl13.grid(row=13, column=0,padx=10,pady=2)\nel13 = ttk.Combobox(gui, width=\"18\", values=(\"Health Care Representative\",\"Human Resources\",\"Laboratory Technician\",\"Manager\",\"Manufacturing Director\",\"Research Director\",\"Research Scientist\",\"Sales Executive\"))\nel13.grid(row=13, column=1)\n\nl14=Label(gui, text='Enter Job Satisfaction(Out of 5)')\nl14.grid(row=14, column=0,padx=10,pady=2)\nel14 = Entry(gui)\nel14.grid(row=14, column=1)\n\nl15=Label(gui, text='Enter Marital Status')\nl15.grid(row=15, column=0,padx=10,pady=2)\nel15 = ttk.Combobox(gui, width=\"18\", values=(\"Single\",\"Divorced\",\"Married\"))\nel15.grid(row=15, column=1)\n\nl16=Label(gui, text='Enter Monthly Income')\nl16.grid(row=16, column=0,padx=10,pady=2)\nel16 = Entry(gui)\nel16.grid(row=16, column=1)\n\nl17=Label(gui, text='Enter Monthly Rate')\nl17.grid(row=17, column=0,padx=10,pady=2)\nel17 = Entry(gui)\nel17.grid(row=17, column=1)\n\nl18=Label(gui, text='Enter Num Companies Worked')\nl18.grid(row=18, column=0,padx=10,pady=2)\nel18 = Entry(gui)\nel18.grid(row=18, column=1)\n\nl19=Label(gui, text='Enter OverTime')\nl19.grid(row=19, column=0,padx=10,pady=2)\nel19 = ttk.Combobox(gui, width=\"18\", values=(\"No\",\"Yes\"))\nel19.grid(row=19, column=1)\n\nl20=Label(gui, text='Enter Percent Salary Hike(Out of 100)')\nl20.grid(row=20, column=0,padx=10,pady=2)\nel20 = Entry(gui)\nel20.grid(row=20, column=1)\n\nl21=Label(gui, text='Enter Performance Rating(Out of 5)')\nl21.grid(row=21, column=0,padx=10,pady=2)\nel21 = Entry(gui)\nel21.grid(row=21, column=1)\n\nl22=Label(gui, text='Enter Relationship Satisfaction(Out of 5)')\nl22.grid(row=22, column=0,padx=10,pady=2)\nel22 = Entry(gui)\nel22.grid(row=22, column=1)\n\nl23=Label(gui, text='Enter Total Working Years')\nl23.grid(row=23, column=0,padx=10,pady=2)\nel23 = Entry(gui)\nel23.grid(row=23, column=1)\n\nl24=Label(gui, text='Enter Training Times Last Year')\nl24.grid(row=24, column=0,padx=10,pady=2)\nel24 = Entry(gui)\nel24.grid(row=24, column=1)\n\nl25=Label(gui, text='Enter Work Life Balance(Out of 5)')\nl25.grid(row=25, column=0,padx=10,pady=2)\nel25 = Entry(gui)\nel25.grid(row=25, column=1)\n\nl26=Label(gui, text='Enter Years At Company')\nl26.grid(row=26, column=0,padx=10,pady=2)\nel26 = Entry(gui)\nel26.grid(row=26, column=1)\n\nl27=Label(gui, text='Enter Years In Current Role')\nl27.grid(row=27, column=0,padx=10,pady=2)\nel27 = Entry(gui)\nel27.grid(row=27, column=1)\n\nl28=Label(gui, text='Enter Years Since Last Promotion')\nl28.grid(row=28, column=0,padx=10,pady=2)\nel28 = Entry(gui)\nel28.grid(row=28, column=1)\n\nl29=Label(gui, text='Enter Years With Current Manager')\nl29.grid(row=29, column=0,padx=10,pady=2)\nel29 = Entry(gui)\nel29.grid(row=29, column=1)\n\naction_button = Button(gui)\naction_button.configure(text='Submit',fg=\"black\",bg=\"light green\", command=get_entry_data)\naction_button.grid(row=30, column=1,padx=10,pady=2)\n\ngui.mainloop()\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"numpy.reshape",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
Elmecio/IForestASD_based_methods_in_sk-Multiflow | [
"eee23b77dc52de704f559cdfbce06b1b96bd2854"
] | [
"source/iforestasd_scikitmultiflow_with_PCA.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Notebook_test_iForestASD_Scikitmultiflow.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1APZBgZ0fuHufYWM5QKqFhR7cpPs0bF2T\n\n# iForestASD : Unsupervised Anomaly Detection with Scikit-MultiFlow\n\nAn Implementation of Unsupervised Anomaly Detection with Isolation Forest in Scikit-MultiFlow with Sliding Windows \\& drift detection\n\n\n## References :\n\n - An Anomaly Detection Approach Based on Isolation Forest for Streaming Data using Sliding Window (Ding \\& Fei, 2013) https://www.sciencedirect.com/science/article/pii/S1474667016314999\n \n - Isolation-based Anomaly Detection (Liu, Ting \\& Zhou, 2011) https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/tkdd11.pdf\n\n - Scikit MultiFlow HalfSpace Trees Implementation - “Fast anomaly detection for streaming data,” in IJCAI Proceedings - S.C.Tan, K.M.Ting, and T.F.Liu, \n https://scikit-multiflow.github.io/scikit-multiflow/_autosummary/skmultiflow.anomaly_detection.HalfSpaceTrees.html#id2\n\n - Original implementation of Isolation Forest (not the one in SK-learn) https://github.com/Divya-Bhargavi/isolation-forest\n\"\"\"\n\n\n\n\"\"\"# Notebook File Structure is the following\n\nPart 1 - Main Class contians\n - Init,\n - Partial_fit,\n - Update_model,\n - Predict methods which use the anomaly_score methods of the iForest class\n\nPart 2 - Isolation Forest class (re-used) and main functions\n - \n\nPart 3 - Testing some examples and comparison of HS-Trees and IsolatationForestStream \n- on synthetic \n- on Real (HTTP) data.\n\n## Import lib and packages\n\"\"\"\n\n\n\n\"\"\"## Install Cyphion then load the Scikit-multiflow latest release from Github\"\"\"\n\n## !pip install scikit-multiflow\n\n\n\n#!pip install -U git+https://github.com/scikit-multiflow/scikit-multiflow\n\nfrom skmultiflow.core import BaseSKMObject, ClassifierMixin\n\nfrom skmultiflow.utils import check_random_state\n\nimport numpy as np\n\nimport random\n\nfrom skmultiflow.drift_detection.adwin import ADWIN\n\n\n\"\"\"# Part 1 - Main class - IsolationForestStream\"\"\"\n\n## To implement this class, we took inspiration from Scikit-MultiFLow HSTrees implementation to follow its requirements.\n\nclass IsolationForestStream(BaseSKMObject, ClassifierMixin):\n\n \"\"\"\n This code implements Anomaly Detection Approach Based on Isolation Forest Algorithm for Streaming Data Using Sliding Window (Ding \\& Fei, 2013) [3]\n\n Each sample has an anomaly score is computed based on Isolation Forest anomaly based approach [2]. The concept of Isolation forest [1]\n consists on isolating observations by randomly selecting a feature\n and then randomly selecting a split value between the maximum and minimum\n values of the selected feature.\n \n Model is updated of a Drift has been detected based on a input drift threshold. The drift detection approach is proposed by [2] \n and works as follow : if the averaged anomaly score between two successive sliding windows is highter than the drift threshold (u), \n then the previous model is completely discarded and a new model is build as an isolation forest on latest sliding windows stream.\n\n\n Parameters\n\n ---------\n\n n_estimators: int, optional (default=25)\n\n Number of trees in the ensemble.\n\n 't' in the original paper.\n\n\n\n window_size: int, optional (default=100)\n\n The window size of the stream.\n\n ψ, 'Psi' in the original paper. \n\n## Optional \n\n anomaly_threshold: double, optional (default=0.5)\n\n The threshold for declaring anomalies.\n\n Any instance prediction probability above this threshold will be declared as an anomaly.\n\n drift_threshold: double, optional (default=0.5)\n\n The threshold for detecting Drift and update the model.\n\n If the averaged anomaly score between two successive sliding windows is highter than the threshold (u), \n then the previous model is completely discarded and a new model is build as an isolation forest on latest sliding windows stream.\n This parameters is supposed to be know by an expert domain, depending on data set.\n\n## Other Attributes\n\n ensemble : Isolation Tree Ensemble\n\n Contain an Isolation Tree Ensemble object, current model for IsolationForestStream\n\n sample_size : int\n\n Number of sample seen since the update\n\n anomaly_rate : float\n\n Rate of the anomalies in the previous sliding window (AnomalyRate in the original paper iForestASD)\n\n prec_window & window : numpy.ndarray of shape (n_samples, self.window_size)\n\n The previous and current window of data\n\n cpt : int\n\n Counter, if the n_estimator is higher than its, it will fit\n\n References\n ----------\n\n [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. \n“Isolation forest.” Data Mining, 2008. ICDM’08. Eighth IEEE International Conference on.\n\n [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. “Isolation-based anomaly detection.” ACM Transactions on Knowledge Discovery from Data (TKDD) 6.1 (2012): \nself.n_estimators\n\n [3] Ding, Zhiguo. (2013) An Anomaly Detection Approach Based on Isolation Forest Algorithm for Streaming Data Using Sliding Window. 12-17. 10.3182/20130902-3-CN-3020.00044. \n\n \"\"\" \n\n def __init__(self, window_size=100, n_estimators=25, anomaly_threshold=0.5, \n drift_threshold=0.5, random_state=None, version=\"AnomalyRate\",\n #Parameters for partial model update\n n_estimators_updated=0.5, updated_randomly=True,\n #Parameters for NDKSWIN\n alpha=0.01, data=None, n_dimensions=1, n_tested_samples=0.1,\n fixed_checked_dimension = False, fixed_checked_sample=False):\n \n super().__init__()\n\n self.n_estimators = n_estimators\n\n self.ensemble = None\n \n self.random_state = random_state\n\n self.window_size = window_size\n\n self.samples_seen = 0\n\n self.anomaly_rate = 0.20 \n\n self.anomaly_threshold = anomaly_threshold\n\n self.drift_threshold = drift_threshold\n\n self.window = None\n\n self.prec_window = None\n\n self.cpt = 0\n self.version = version\n self.model_update = [] #To count the number of times the model have been updated 0 Not updated and 1 updated\n self.model_update_windows = [] #To count the number of times the model have been updated 0 Not updated and 1 updated\n self.model_update.append(version) #Initialisation to know the concerned version of IForestASD\n self.model_update_windows.append(\"samples_seen_\"+version) #Initialisation to know the number of data seen in the window\n self.n_estimators_updated=int(self.n_estimators*n_estimators_updated) # The percentage of new trees to compute when update on new window\n if n_estimators_updated <= 0.0 or n_estimators_updated > 1.0 :\n raise ValueError(\"n_estimators_updated must be > 0 and <= 1\")\n \n self.updated_randomly=updated_randomly # If we will choose randomly the trees: True for randomly, \n # False to pick the first (n_estimators- int(n_estimators*n_estimators_updated)) trees\n\n self.alpha=alpha\n self.n_dimensions=n_dimensions\n self.n_tested_samples=n_tested_samples\n self.fixed_checked_dimension =fixed_checked_dimension\n self.fixed_checked_sample=fixed_checked_sample\n self.first_time_fit = True\n \n # TODO Maurras 27112020: Find a way to optimize the use of ADWIN()\n self.adwin = ADWIN()\n \n def partial_fit(self, X, y, classes=None, sample_weight=None):\n\n \"\"\" Partially (incrementally) fit the model.\n Parameters\n ----------\n X : numpy.ndarray of shape (n_samples, n_features)\n The features to train the model.\n y: numpy.ndarray of shape (n_samples)\n An array-like with the class labels of all samples in X.\n classes: None\n Not used by this method.\n sample_weight: None\n Not used by this method.\n Returns\n -------\n self\n \"\"\"\n\n ## get the number of observations\n number_instances, _ = X.shape\n \n if(self.samples_seen==0):\n ## ToDo ? Give a sample of self.window_size in attribute of iForest\n iforest = IsolationTreeEnsemble(self.window_size,self.n_estimators,self.random_state)\n self.ensemble = iforest\n\n for i in range(number_instances):\n self._partial_fit(X[i], y[i])\n\n return self\n\n\n def _partial_fit(self, X, y):\n\n \"\"\" Trains the model on samples X and corresponding targets y.\n Private function where actual training is carried on.\n Parameters\n ----------\n X: numpy.ndarray of shape (1, n_features)\n Instance attributes.\n y: int\n Class label for sample X. Not used in this implementaion which is Unsupervised\n \"\"\" \n \n \"\"\"\n Reshape X and add it to our window if it isn't full.\n If it's full, give window to our precedent_window.\n If we are at the end our window, fit if we're learning \n Check the anomaly score of our window \n Update if self.anomaly_rate > self.drift_threshold\n\n \"\"\"\n X = np.reshape(X,(1,len(X)))\n\n if self.samples_seen % self.window_size == 0:\n ## Update the two windows (precedent one and current windows)\n self.prec_window = self.window\n self.window = X\n else:\n self.window = np.concatenate((self.window,X))\n \n \n if self.samples_seen % self.window_size == 0 and self.samples_seen !=0:\n \n if(self.version == \"PCANDKSWIN\"):\n if self.first_time_fit: #It is the first window \n from source import ndkswin as ndk\n self.ndkswin = ndk.NDKSWIN(alpha=self.alpha, data=self.prec_window, n_dimensions=self.n_dimensions, \n window_size=self.window_size, stat_size=30, \n n_tested_samples=self.n_tested_samples,\n fixed_checked_dimension = self.fixed_checked_dimension, \n fixed_checked_sample=self.fixed_checked_sample)\n #if(self.cpt<self.n_estimators):\n self.ensemble.fit(self.prec_window)\n #self.cpt += 1 \n self.first_time_fit = False\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n #print(\"ndkswin created\")\n else:\n #print(\"self.samples_seen\")\n #print(self.samples_seen)\n #print(\"self.window_size\")\n #print(self.window_size)\n #print(\"self.window\")\n #print(self.window)\n #print(\"X\")\n #print(X)\n #print(\"prec_window\")\n #print(self.prec_window) \n \n self.ndkswin.add_element(self.prec_window)\n if self.ndkswin.detected_change():\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n self.update_model(self.prec_window) # This function will discard completly the old model and create a new one\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n \n elif(self.version == \"NDKSWIN\"):\n if self.first_time_fit: #It is the first window \n from source import ndkswin as ndk\n self.ndkswin = ndk.NDKSWIN(alpha=self.alpha, data=self.prec_window, n_dimensions=self.n_dimensions, \n window_size=self.window_size, stat_size=30, \n n_tested_samples=self.n_tested_samples,\n fixed_checked_dimension = self.fixed_checked_dimension, \n fixed_checked_sample=self.fixed_checked_sample)\n #if(self.cpt<self.n_estimators):\n self.ensemble.fit(self.prec_window)\n #self.cpt += 1 \n self.first_time_fit = False\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n #print(\"ndkswin created\")\n else:\n #print(\"self.samples_seen\")\n #print(self.samples_seen)\n #print(\"self.window_size\")\n #print(self.window_size)\n #print(\"self.window\")\n #print(self.window)\n #print(\"X\")\n #print(X)\n #print(\"prec_window\")\n #print(self.prec_window) \n \n self.ndkswin.add_element(self.prec_window)\n if self.ndkswin.detected_change():\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n self.update_model(self.prec_window) # This function will discard completly the old model and create a new one\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n else: \n #Fit the ensemble if it's not empty\n #if(self.cpt<self.n_estimators):\n # self.ensemble.fit(self.prec_window)\n # self.cpt += 1 \n if self.first_time_fit: #It is the first window \n self.ensemble.fit(self.prec_window)\n self.first_time_fit = False\n\n if(self.version == \"AnomalyRate\"):\n #print('start AnomalyRate version')\n ## Update the current anomaly score\n self.anomaly_rate = self.anomaly_scores_rate(self.prec_window) ## Anomaly rate\n #print(self.anomaly_rate) ## \n ## Update the model if the anomaly rate is greater than the threshold (u in the original paper [3])\n if self.anomaly_rate > self.drift_threshold: ## Use Anomaly RATE ?\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n self.update_model(self.prec_window) # This function will discard completly the old model and create a new one\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n \n elif(self.version == \"MAnomalyRate\"):\n #print('start AnomalyRate version')\n ## Update the current anomaly score\n self.anomaly_rate = self.anomaly_scores_rate(self.prec_window) ## Anomaly rate\n #print(self.anomaly_rate) ## \n ## Update the model if the anomaly rate is greater than the threshold (u in the original paper [3])\n if self.anomaly_rate > self.drift_threshold: ## Use Anomaly RATE ?\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n #print(\"MIForestASD Before partial_update_model\")\n self.partial_update_model(self.prec_window) # This function will discard completly the old model and create a new one\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n \n elif(self.version == \"SADWIN\"):\n #if self.first_time_fit:\n # from skmultiflow.drift_detection.adwin import ADWIN\n # adwin = ADWIN()\n # self.first_time_fit = False\n #print('start sadwin version')\n #TODO MAJ Maurras 04112020 : Modify the way to detect the concept drift using the ADWIN() function availlable in scikitMultiflow\n #from skmultiflow.drift_detection.adwin import ADWIN\n #adwin = ADWIN()\n prec_window_scores = self.ensemble.anomaly_score(self.prec_window)\n #print(prec_window_scores)\n #print('Before add element to adwin')\n drift_detected = False\n #ind = 0\n for score in prec_window_scores:\n #adwin.add_element(prec_window_scores)\n #print(\"added score = \"+ str(score) + \" on index = \"+ str(ind))\n self.adwin.add_element(score)\n #print('start change detection')\n if self.adwin.detected_change():\n #print('Change detected on index = '+ str(ind))\n drift_detected = True\n #print(\"Index = \"+str(i) +\" of the window with data \"+ str(self.prec_window[i]))\n break;\n #ind = ind + 1\n if(drift_detected): \n #print('start model updating')\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n self.update_model(self.prec_window)\n self.adwin.reset()\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n \n elif(self.version == \"SMADWIN\"):\n #if self.first_time_fit:\n # from skmultiflow.drift_detection.adwin import ADWIN\n # adwin = ADWIN()\n # self.first_time_fit = False\n #print('start sadwin version')\n #TODO MAJ Maurras 04112020 : Modify the way to detect the concept drift using the ADWIN() function availlable in scikitMultiflow\n #from skmultiflow.drift_detection.adwin import ADWIN\n #adwin = ADWIN()\n prec_window_scores = self.ensemble.anomaly_score(self.prec_window)\n #print(prec_window_scores)\n #print('Before add element to adwin')\n drift_detected = False\n #ind = 0\n for score in prec_window_scores:\n #adwin.add_element(prec_window_scores)\n #print(\"added score = \"+ str(score) + \" on index = \"+ str(ind))\n self.adwin.add_element(score)\n #print('start change detection')\n if self.adwin.detected_change():\n #print('Change detected on index = '+ str(ind))\n drift_detected = True\n #print(\"Index = \"+str(i) +\" of the window with data \"+ str(self.prec_window[i]))\n break;\n #ind = ind + 1\n if(drift_detected): \n #print('start model updating')\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n #print(\"SMADWIN Before partial_update_model\")\n self.partial_update_model(self.prec_window)\n self.adwin.reset()\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n \n elif(self.version == \"PADWIN\"):\n #if self.first_time_fit:\n # from skmultiflow.drift_detection.adwin import ADWIN\n # adwin = ADWIN()\n # self.first_time_fit = False\n #print('start PADWIN version')\n #TODO MAJ Maurras 04112020 : Modify the way to detect the concept drift using the ADWIN() function availlable in scikitMultiflow\n #from skmultiflow.drift_detection.adwin import ADWIN\n #adwin = ADWIN()\n prec_window_predictions = self.predict_simple(self.prec_window)\n #print(prec_window_predictions)\n #print('Before add element to adwin')\n drift_detected = False\n #ind = 0\n for pred in prec_window_predictions:\n #adwin.add_element(prec_window_scores)\n #print(\"added pred = \"+ str(pred))\n self.adwin.add_element(pred)\n #print('start change detection')\n if self.adwin.detected_change():\n #print('Change detected')\n drift_detected = True\n #print(\"Index = \"+str(i) +\" of the window with data \"+ str(self.prec_window[i]))\n break;\n #ind = ind + 1\n if(drift_detected): \n #print('start model updating')\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n self.update_model(self.prec_window)\n self.adwin.reset()\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n \n elif(self.version == \"PMADWIN\"):\n #if self.first_time_fit:\n # from skmultiflow.drift_detection.adwin import ADWIN\n # adwin = ADWIN()\n # self.first_time_fit = False\n #print('start PADWIN version')\n #TODO MAJ Maurras 04112020 : Modify the way to detect the concept drift using the ADWIN() function availlable in scikitMultiflow\n #from skmultiflow.drift_detection.adwin import ADWIN\n #adwin = ADWIN()\n prec_window_predictions = self.predict_simple(self.prec_window)\n #print(prec_window_predictions)\n #print('Before add element to adwin')\n drift_detected = False\n #ind = 0\n for pred in prec_window_predictions:\n #adwin.add_element(prec_window_scores)\n #print(\"added pred = \"+ str(pred))\n self.adwin.add_element(pred)\n #print('start change detection')\n if self.adwin.detected_change():\n #print('Change detected')\n drift_detected = True\n #print(\"Index = \"+str(i) +\" of the window with data \"+ str(self.prec_window[i]))\n break;\n #ind = ind + 1\n if(drift_detected): \n #print('start model updating')\n self.model_update.append(1)\n self.model_update_windows.append(self.samples_seen)\n #print(\"PMADWIN Before partial_update_model\")\n self.partial_update_model(self.prec_window)\n self.adwin.reset()\n else:\n self.model_update.append(0)\n self.model_update_windows.append(self.samples_seen)\n \n self.samples_seen += 1\n \n #def execute_NDKSWIN(stream, window_size=100, window_number=1000):\n # first_window = stream.next_sample(window_size)[0]\n #print(first_window)\n #print(type(first_window))\n # ndkswin = ndk.NDKSWIN(alpha=0.01, data=first_window, n_dimensions=1, n_tested_samples=0.01,\n # fixed_checked_dimension = True, fixed_checked_sample=True)\n # Store detections\n #detections = []\n # Process stream via NDKSWIN and print detections\n #for i in range(window_number-1):\n #data = stream.next_sample(window_size)\n # data = stream.next_sample(window_size)\n # batch = data[0]\n # ndkswin.add_element(batch)\n # if ndkswin.detected_change():\n #print(\"\\rIteration {}\".format(i))\n #print(\"\\r KSWINReject Null Hyptheses\")\n # detections.append(i)\n #ndkswin.reset()\n #ndkswin = ndk.NDKSWIN(alpha=0.01, data=batch, n_dimensions=1, n_tested_samples=0.1,\n # fixed_checked_dimension = True, fixed_checked_sample=True)\n #print(\"Drift detected in window n° \"+str(detections))\n #print(\"Number of detections: \"+str(len(detections)))\n \n def partial_update_model(self,window):\n \"\"\" Update the model (fit a new isolation forest) if the current anomaly rate (in the previous sliding window)\n is higher than self.drift_threshold\n Parameters: \n window: numpy.ndarray of shape (self.window_size, n_features)\n Re-Initialize our attributes and our ensemble, fit with the current window\n\n \"\"\"\n \n #print(\"In partial_update_model\")\n self.is_learning_phase_on = True\n iforest = IsolationTreeEnsemble(self.window_size,self.n_estimators_updated,self.random_state)\n #print(\"After new model creation\")\n iforest.fit(window)\n #print(\"After new model fitting\")\n if self.updated_randomly :\n #print(\"Randomly choose trees\")\n old_trees_idx = random.sample(list(range(len(self.ensemble.trees))), (self.n_estimators-self.n_estimators_updated))\n #print(type(old_trees_idx))\n else:\n #print(\"First trees\")\n old_trees_idx = range(self.n_estimators-self.n_estimators_updated)\n #print(type(old_trees_idx))\n \n #print(\"After indices choices\")\n #TODO Maurras 26112020: Code à reécrire pour facilement récuperer les arbres à garder sans explicitement utiliser la boucle\n for t in old_trees_idx:\n iforest.trees.append(self.ensemble.trees[t])\n #self.ensemble.trees = np.concatenate((self.ensemble.trees[old_trees_idx],iforest.trees))\n self.ensemble.trees = iforest.trees\n #print(\"After np.concatenate\")\n \n #self.nb_update = self.nb_update + 1\n print(\"\")\n print(\"The model was partially updated by training a sub new iForest with the version : \"+self.version)\n \n def update_model(self,window):\n \"\"\" Update the model (fit a new isolation forest) if the current anomaly rate (in the previous sliding window)\n is higher than self.drift_threshold\n Parameters: \n window: numpy.ndarray of shape (self.window_size, n_features)\n Re-Initialize our attributes and our ensemble, fit with the current window\n\n \"\"\"\n\n ## ToDo ? Give a sample of self.window_size in attribute of iForest\n #MAJ Maurras 03112020 : No, Leave it like that. Must give all the window to tt construct the forest of itrees.\n self.is_learning_phase_on = True\n iforest = IsolationTreeEnsemble(self.window_size,self.n_estimators,self.random_state)\n self.ensemble = iforest\n self.ensemble.fit(window)\n #self.nb_update = self.nb_update + 1\n print(\"\")\n print(\"The model was updated by training a new iForest with the version : \"+self.version)\n \n \n def anomaly_scores_rate(self, window):\n \"\"\"\n Given a 2D matrix of observations, compute the anomaly rate \n for all instances in the window and return an anomaly rate of the given window.\n\n Parameters :\n window: numpy.ndarray of shape (self.window_size, n_features)\n \"\"\"\n\n score_tab = 2.0 ** (-1.0 * self.ensemble.path_length(window) / c(len(window)))\n score = 0\n for x in score_tab:\n if x > self.anomaly_threshold:\n score += 1\n return score / len(score_tab)\n \n '''\n MAJ : 21112020\n By : Maurras\n Add new function to classify instances (anomaly or normal)\n ''' \n def predict_simple(self, X):\n \"\"\"\n Given a window, Predict the instance class (1 or 0) by using predict_from_instances_scores on our model\n\n \"\"\"\n #print('predict_simple')\n prediction = self.ensemble.predict_from_instances_scores(self.ensemble.anomaly_score(X),\n self.anomaly_threshold) ## return prediction of all instances\n\n #print('end predict_simple')\n return prediction\n \n def predict(self, X):\n \"\"\"\n Given an instance, Predict the anomaly (1 or 0) based on the last sample of the window by using predict_proba if our model have fit, \n else return None\n\n \"\"\"\n if(self.samples_seen <= self.window_size):\n\n return [-1] ## Return the last element\n\n X = np.reshape(X,(1,len(X[0])))\n self.prec_window = np.concatenate((self.prec_window ,X)) ## Append the instances in the sliding window\n\n prediction = self.ensemble.predict_from_anomaly_scores(self.predict_proba(self.prec_window),self.anomaly_threshold) ## return 0 or 1\n\n return [prediction]\n \n def predict_proba(self, X):\n \"\"\"\n Calculate the anomaly score of the window if our model have fit, else return None\n Parameters :\n X: numpy.ndarray of shape (self.window_size, n_features) \n\n \"\"\"\n if(self.samples_seen <= self.window_size):\n return [-1]\n return self.ensemble.anomaly_score(self.prec_window)[-1] # Anomaly return an array with all scores of each data, taking -1 return the last instance (X) anomaly score\n\n\"\"\"# Part 2- IsolationTreeEnsemble Class (iForest in the original paper)\"\"\"\n\n# Follows original paper algo from https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/icdm08b.pdf\n# Original Source re-used and adpted to our project from https://github.com/Divya-Bhargavi/isolation-forest \nclass IsolationTreeEnsemble:\n def __init__(self, sample_size, n_trees, random_state):\n self.sample_size = sample_size\n self.n_trees = n_trees\n self.depth = np.log2(sample_size)\n self.trees = []\n self.random_state = random_state\n self._random_state = check_random_state(self.random_state)\n self.is_learning_phase_on = True \n\n def fit(self, X:np.ndarray):\n \"\"\"\n Given a 2D matrix of observations, create an ensemble of IsolationTree\n objects and store them in a list: self.trees. Convert DataFrames to\n ndarray objects.\n \"\"\"\n len_x = len(X)\n\n for i in range(self.n_trees):\n sample_idx = random.sample(list(range(len_x)), self.sample_size )\n temp_tree = IsolationTree(self.depth, 0).fit(X[sample_idx])\n self.trees.append(temp_tree)\n\n return self\n \n def path_length(self, X:np.ndarray):\n \"\"\"\n Given a 2D matrix of observations, X, compute the average path length\n for each observation in X. Compute the path length for x_i using every\n tree in self.trees then compute the average for each x_i. Return an\n ndarray of shape (len(X),1).\n \"\"\"\n pl_vector = []\n\n for x in (X):\n pl = np.array([path_length_tree(x, t, 0) for t in self.trees])\n pl = pl.mean()\n\n pl_vector.append(pl)\n\n pl_vector = np.array(pl_vector).reshape(-1, 1)\n\n return pl_vector\n\n def anomaly_score(self, X:np.ndarray):\n \"\"\"\n Given a 2D matrix of observations, X, compute the anomaly score\n for each x_i observation, returning an ndarray of them.\n \"\"\"\n return 2.0 ** (-1.0 * self.path_length(X) / c(len(X)))\n\n def predict_from_anomaly_scores(self, scores:int, threshold:float):\n \"\"\"\n Given an array of scores and a score threshold, return an array of\n the predictions: 1 for any score >= the threshold and 0 otherwise.\n \"\"\"\n predictions = 1 if scores >= threshold else 0\n\n return predictions\n \n '''\n MAJ : 21112020\n By : Maurras\n Add new function to classify instances (anomaly or normal)\n ''' \n def predict_from_instances_scores(self, scores:np.ndarray, threshold:float) -> np.ndarray:\n \"\"\"\n Given an array of scores and a score threshold, return an array of\n the predictions: -1 for any score >= the threshold and 1 otherwise.\n \"\"\"\n \n predictions = [1 if p[0] >= threshold else 0 for p in scores]\n \n return predictions\n\nclass IsolationTree:\n def __init__(self, height_limit, current_height):\n\n self.depth = height_limit\n self.current_height = current_height\n self.split_by = None\n self.split_value = None\n self.right = None\n self.left = None\n self.size = 0\n self.exnodes = 0\n self.n_nodes = 1\n\n def fit(self, X:np.ndarray):\n \"\"\"\n Given a 2D matrix of observations, create an isolation tree. Set field\n self.root to the root of that tree and return it.\n If you are working on an improved algorithm, check parameter \"improved\"\n and switch to your new functionality else fall back on your original code.\n \"\"\"\n\n if len(X) <= 1 or self.current_height >= self.depth:\n self.exnodes = 1\n self.size = X.shape[0]\n\n return self\n\n split_by = random.choice(np.arange(X.shape[1]))\n X_col = X[:, split_by]\n min_x = X_col.min()\n max_x = X_col.max()\n\n #TODO MAJ: MAurras 03112020 = Revoir ce bout de code : ça pourrait créer des problèmes\n if min_x == max_x:\n self.exnodes = 1\n self.size = len(X)\n\n return self\n\n else:\n\n split_value = min_x + random.betavariate(0.5, 0.5) * (max_x - min_x)\n\n w = np.where(X_col < split_value, True, False)\n del X_col\n\n self.size = X.shape[0]\n self.split_by = split_by\n self.split_value = split_value\n\n self.left = IsolationTree(self.depth, self.current_height + 1).fit(X[w])\n self.right = IsolationTree(self.depth, self.current_height + 1).fit(X[~w])\n self.n_nodes = self.left.n_nodes + self.right.n_nodes + 1\n\n return self\n\ndef c(n):\n if n > 2:\n return 2.0*(np.log(n-1)+0.5772156649) - (2.0*(n-1.)/(n*1.0))\n elif n == 2:\n return 1\n if n == 1:\n return 0\n\ndef path_length_tree(x, t,e):\n e = e\n if t.exnodes == 1:\n e = e+ c(t.size)\n return e\n else:\n a = t.split_by\n if x[a] < t.split_value :\n return path_length_tree(x, t.left, e+1)\n if x[a] >= t.split_value :\n return path_length_tree(x, t.right, e+1)"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.log",
"numpy.where",
"numpy.arange",
"numpy.log2"
]
] |
dankunis/visual-geometry-project | [
"ff03e66046655529d851a7d5b771ee213bc8b8f9"
] | [
"src/camera_calibration.py"
] | [
"#!/usr/bin/env python3.7.2\n# vim: set ts=4 et:\n# -*- indent-tabs-mode: t; tab-width: 4 -*-\n#\n# @brief Camera Calibration function\n# @details In this function the camera calibration will be calculated from a given chessboard_images\n# @author Simon Rueba <[email protected]>\n# Daniel Kunis <[email protected]>\n# Florian Maier <[email protected]>\n\nimport glob\nimport os\n\nimport cv2\nimport numpy as np\nimport yaml\nfrom tqdm import tqdm\n\nfrom utils import image_resize\n\n\ndef calc_camera_calibration(chessboard_size, termination_criteria, calibration_img_path, calibration_config_path):\n \"\"\"\n Calculates the camera calibration from a given chessboard_images\n :param chessboard_size: Size of the chessboard_images\n :param termination_criteria: number of iterations and/or the accuracy\n :param calibration_img_path: Path to the chessboard_images\n :param calibration_config_path: Path on where to store the calibration results\n :return: None\n \"\"\"\n print(\"[CALIBRATION] : Calculating camera calibration...\")\n\n chessboard_x, chessboard_y = chessboard_size\n\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((chessboard_y * chessboard_x, 3), np.float32)\n objp[:, :2] = np.mgrid[0:chessboard_x, 0:chessboard_y].T.reshape(-1, 2)\n\n # Arrays to store object points and image points from all the images.\n obj_points = [] # 3d point in real world space\n img_points = [] # 2d points in image plane.\n\n counter = 0\n number_of_images = len(os.listdir(calibration_img_path)) - 1\n with tqdm(total=number_of_images) as pbar:\n for fname in glob.glob(os.path.join(calibration_img_path, '*')):\n counter += 1\n pbar.update(1)\n\n if counter == number_of_images:\n break\n\n img = cv2.imread(fname)\n\n # resize the image so it would be no bigger than 1920x1080\n height, width = img.shape[:2]\n if max(width, height) > 2000:\n if height > width:\n new_width = 1080\n else:\n new_width = 1920\n\n img = image_resize(img, width=new_width)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (chessboard_x, chessboard_y), None)\n\n # If found, add object points, image points (after refining them)\n if ret:\n obj_points.append(objp)\n corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), termination_criteria)\n img_points.append(corners2)\n\n cv2.destroyAllWindows()\n\n _, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)\n\n reprojection_error = 0\n for i in range(len(obj_points)):\n imgpoints2, _ = cv2.projectPoints(obj_points[i], rvecs[i], tvecs[i], mtx, dist)\n error = cv2.norm(img_points[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)\n reprojection_error += error\n\n reprojection_error /= len(obj_points)\n\n print(\"[CALIBRATION] : Done with calculating. Saving file in: \" + calibration_config_path)\n\n # It's very important to transform the matrix to list.\n data = {\n 'camera_matrix': np.asarray(mtx).tolist(),\n 'dist_coeff': np.asarray(dist).tolist(),\n 'reprojection_error': reprojection_error\n }\n with open(calibration_config_path, \"w\") as f:\n yaml.dump(data, f)\n\n print(\"[CALIBRATION] : File saved!\")\n"
] | [
[
"numpy.asarray",
"numpy.zeros"
]
] |
Sr-vZ/Auto-Grade-OMR | [
"855a30c1e89826c07cbb84e141d5bb2eb2ccf574"
] | [
"document_scanner.py"
] | [
"import cv2\nimport numpy as np\nimport os\nimport argparse\n\ndef transform(pos):\n\tpts = []\n\tn = len(pos)\n\tfor i in range(n):\n\t\tpts.append(list(pos[i][0]))\n\t#print pts\n\tsums = {}\n\tdiffs = {}\n\ttl = tr = bl = br = 0\n\tfor i in pts:\n\t\tx = i[0]\n\t\ty = i[1]\n\t\tsum = x+y\n\t\tdiff = y-x\n\t\tsums[sum] = i\n\t\tdiffs[diff] = i\n\tsums = sorted(sums.items())\n\tdiffs = sorted(diffs.items())\n\tn = len(sums)\n\trect = [sums[0][1], diffs[0][1], diffs[n-1][1], sums[n-1][1]]\n\t#\t top-left top-right bottom-left bottom-right\n\n\th1 = np.sqrt((rect[0][0]-rect[2][0])**2 +\n\t (rect[0][1]-rect[2][1])**2) # height of left side\n\th2 = np.sqrt((rect[1][0]-rect[3][0])**2 +\n\t (rect[1][1]-rect[3][1])**2) # height of right side\n\th = max(h1, h2)\n\n\tw1 = np.sqrt((rect[0][0]-rect[1][0])**2 +\n\t (rect[0][1]-rect[1][1])**2) # width of upper side\n\tw2 = np.sqrt((rect[2][0]-rect[3][0])**2 +\n\t (rect[2][1]-rect[3][1])**2) # width of lower side\n\tw = max(w1, w2)\n\n\t#print '#',rect\n\treturn int(w), int(h), rect\n\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n\n# img = cv2.imread('C:\\Users\\Admin\\Desktop\\pic.jpg')\nimg = cv2.imread(args[\"image\"])\nr = 500.0 / img.shape[1]\ndim = (500, int(img.shape[0] * r))\nimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n#img=cv2.resize(img,None,fx=.1,fy=.1,interpolation=cv2.INTER_CUBIC)\ncv2.imshow('ORIGINAL', img)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (11, 11), 0)\nedge = cv2.Canny(gray, 100, 200)\n# _, contours, _ = cv2.findContours(edge.copy(), 1, 1)\n# _, contours = cv2.findContours(edge.copy(), 1, 1)\ntmp = cv2.findContours(edge.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncontours = tmp[0] if len(tmp) == 2 else tmp[1]\nn = len(contours)\nmax_area = 0\npos = 0\nfor i in contours:\n\tarea = cv2.contourArea(i)\n\tif area > max_area:\n\t\tmax_area = area\n\t\tpos = i\nperi = cv2.arcLength(pos, True)\napprox = cv2.approxPolyDP(pos, 0.02*peri, True)\n\nsize = img.shape\ncv2.imshow('BILL', img)\nw, h, arr = transform(approx)\n\npts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\npts1 = np.float32(arr)\nM = cv2.getPerspectiveTransform(pts1, pts2)\ndst = cv2.warpPerspective(img, M, (w, h))\n#dst=dst.astype('uint8')*255\nimage = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)\nimage = cv2.adaptiveThreshold(image, 255, 1, 0, 11, 2)\nimage = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA)\ncv2.imshow('IMAGE', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.float32",
"numpy.sqrt"
]
] |
JJoving/wenet | [
"4a2195744dba43fe4fb9ad8d46a2b90a80dbdc4e"
] | [
"wenet/dataset/dataset.py"
] | [
"# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Chao Yang)\n# Copyright (c) 2021 Jinsong Pan\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport codecs\nimport copy\nimport logging\nimport random\n\nimport numpy as np\nimport torch\nimport torchaudio\nimport torchaudio.compliance.kaldi as kaldi\nimport torchaudio.sox_effects as sox_effects\nimport yaml\nfrom PIL import Image\nfrom PIL.Image import BICUBIC\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import Dataset, DataLoader\n\nimport wenet.dataset.kaldi_io as kaldi_io\nfrom wenet.dataset.wav_distortion import distort_wav_conf\nfrom wenet.utils.common import IGNORE_ID\ntorchaudio.set_audio_backend(\"sox_io\")\n\n\ndef _spec_augmentation(x,\n warp_for_time=False,\n num_t_mask=2,\n num_f_mask=2,\n max_t=50,\n max_f=10,\n max_w=80):\n \"\"\" Deep copy x and do spec augmentation then return it\n\n Args:\n x: input feature, T * F 2D\n num_t_mask: number of time mask to apply\n num_f_mask: number of freq mask to apply\n max_t: max width of time mask\n max_f: max width of freq mask\n max_w: max width of time warp\n\n Returns:\n augmented feature\n \"\"\"\n y = np.copy(x)\n max_frames = y.shape[0]\n max_freq = y.shape[1]\n\n # time warp\n if warp_for_time and max_frames > max_w * 2:\n center = random.randrange(max_w, max_frames - max_w)\n warped = random.randrange(center - max_w, center + max_w) + 1\n\n left = Image.fromarray(x[:center]).resize((max_freq, warped), BICUBIC)\n right = Image.fromarray(x[center:]).resize(\n (max_freq, max_frames - warped), BICUBIC)\n y = np.concatenate((left, right), 0)\n # time mask\n for i in range(num_t_mask):\n start = random.randint(0, max_frames - 1)\n length = random.randint(1, max_t)\n end = min(max_frames, start + length)\n y[start:end, :] = 0\n # freq mask\n for i in range(num_f_mask):\n start = random.randint(0, max_freq - 1)\n length = random.randint(1, max_f)\n end = min(max_freq, start + length)\n y[:, start:end] = 0\n return y\n\n\ndef _spec_substitute(x, max_t=20, num_t_sub=3):\n \"\"\" Deep copy x and do spec substitute then return it\n\n Args:\n x: input feature, T * F 2D\n max_t: max width of time substitute\n num_t_sub: number of time substitute to apply\n\n Returns:\n augmented feature\n \"\"\"\n y = np.copy(x)\n max_frames = y.shape[0]\n for i in range(num_t_sub):\n start = random.randint(0, max_frames - 1)\n length = random.randint(1, max_t)\n end = min(max_frames, start + length)\n # only substitute the earlier time chosen randomly for current time\n pos = random.randint(0, start)\n y[start:end, :] = y[start - pos:end - pos, :]\n return y\n\n\ndef _waveform_distortion(waveform, distortion_methods_conf):\n \"\"\" Apply distortion on waveform\n\n This distortion will not change the length of the waveform.\n\n Args:\n waveform: numpy float tensor, (length,)\n distortion_methods_conf: a list of config for ditortion method.\n a method will be randomly selected by 'method_rate' and\n apply on the waveform.\n\n Returns:\n distorted waveform.\n \"\"\"\n r = random.uniform(0, 1)\n acc = 0.0\n for distortion_method in distortion_methods_conf:\n method_rate = distortion_method['method_rate']\n acc += method_rate\n if r < acc:\n distortion_type = distortion_method['name']\n distortion_conf = distortion_method['params']\n point_rate = distortion_method['point_rate']\n return distort_wav_conf(waveform, distortion_type, distortion_conf,\n point_rate)\n return waveform\n\n\n# add speed perturb when loading wav\n# return augmented, sr\ndef _load_wav_with_speed(wav_file, speed):\n \"\"\" Load the wave from file and apply speed perpturbation\n\n Args:\n wav_file: input feature, T * F 2D\n\n Returns:\n augmented feature\n \"\"\"\n if speed == 1.0:\n wav, sr = torchaudio.load(wav_file)\n else:\n si, _ = torchaudio.info(wav_file)\n\n # get torchaudio version\n ta_no = torchaudio.__version__.split(\".\")\n ta_version = 100 * int(ta_no[0]) + 10 * int(ta_no[1])\n\n if ta_version < 80:\n # Note: deprecated in torchaudio>=0.8.0\n E = sox_effects.SoxEffectsChain()\n E.append_effect_to_chain('speed', speed)\n E.append_effect_to_chain(\"rate\", si.rate)\n E.set_input_file(wav_file)\n wav, sr = E.sox_build_flow_effects()\n else:\n # Note: enable in torchaudio>=0.8.0\n wav, sr = sox_effects.apply_effects_file(\n wav_file,\n [['speed', str(speed)], ['rate', str(si.rate)]])\n\n # sox will normalize the waveform, scale to [-32768, 32767]\n wav = wav * (1 << 15)\n return wav, sr\n\n\ndef _extract_feature(batch, speed_perturb, wav_distortion_conf,\n feature_extraction_conf):\n \"\"\" Extract acoustic fbank feature from origin waveform.\n\n Speed perturbation and wave amplitude distortion is optional.\n\n Args:\n batch: a list of tuple (wav id , wave path).\n speed_perturb: bool, whether or not to use speed pertubation.\n wav_distortion_conf: a dict , the config of wave amplitude distortion.\n feature_extraction_conf:a dict , the config of fbank extraction.\n\n Returns:\n (keys, feats, labels)\n \"\"\"\n keys = []\n feats = []\n lengths = []\n wav_dither = wav_distortion_conf['wav_dither']\n wav_distortion_rate = wav_distortion_conf['wav_distortion_rate']\n distortion_methods_conf = wav_distortion_conf['distortion_methods']\n if speed_perturb:\n speeds = [1.0, 1.1, 0.9]\n weights = [1, 1, 1]\n speed = random.choices(speeds, weights, k=1)[0]\n # speed = random.choice(speeds)\n for i, x in enumerate(batch):\n try:\n wav = x[1]\n value = wav.strip().split(\",\")\n # 1 for general wav.scp, 3 for segmented wav.scp\n assert len(value) == 1 or len(value) == 3\n wav_path = value[0]\n sample_rate = torchaudio.backend.sox_io_backend.info(wav_path).sample_rate\n if speed_perturb:\n if len(value) == 3:\n logging.error(\n \"speed perturb does not support segmented wav.scp now\")\n assert len(value) == 1\n waveform, sample_rate = _load_wav_with_speed(wav_path, speed)\n else:\n # value length 3 means using segmented wav.scp\n # incluede .wav, start time, end time\n if len(value) == 3:\n start_frame = int(float(value[1]) * sample_rate)\n end_frame = int(float(value[2]) * sample_rate)\n waveform, sample_rate = torchaudio.backend.sox_io_backend.load(\n filepath=wav_path,\n num_frames=end_frame - start_frame,\n offset=start_frame)\n else:\n waveform, sample_rate = torchaudio.load(wav_path)\n waveform = waveform * (1 << 15)\n\n if wav_distortion_rate > 0.0:\n r = random.uniform(0, 1)\n if r < wav_distortion_rate:\n waveform = waveform.detach().numpy()\n waveform = _waveform_distortion(waveform,\n distortion_methods_conf)\n waveform = torch.from_numpy(waveform)\n mat = kaldi.fbank(\n waveform,\n num_mel_bins=feature_extraction_conf['mel_bins'],\n frame_length=feature_extraction_conf['frame_length'],\n frame_shift=feature_extraction_conf['frame_shift'],\n dither=wav_dither,\n energy_floor=0.0,\n sample_frequency=sample_rate)\n mat = mat.detach().numpy()\n feats.append(mat)\n keys.append(x[0])\n lengths.append(mat.shape[0])\n except (Exception) as e:\n print(e)\n logging.warn('read utterance {} error'.format(x[0]))\n pass\n # Sort it because sorting is required in pack/pad operation\n order = np.argsort(lengths)[::-1]\n sorted_keys = [keys[i] for i in order]\n sorted_feats = [feats[i] for i in order]\n labels = [x[2].split() for x in batch]\n labels = [np.fromiter(map(int, x), dtype=np.int32) for x in labels]\n sorted_labels = [labels[i] for i in order]\n return sorted_keys, sorted_feats, sorted_labels\n\n\ndef _load_feature(batch):\n \"\"\" Load acoustic feature from files.\n\n The features have been prepared in previous step, usualy by Kaldi.\n\n Args:\n batch: a list of tuple (wav id , feature ark path).\n\n Returns:\n (keys, feats, labels)\n \"\"\"\n keys = []\n feats = []\n lengths = []\n for i, x in enumerate(batch):\n try:\n mat = kaldi_io.read_mat(x[1])\n feats.append(mat)\n keys.append(x[0])\n lengths.append(mat.shape[0])\n except (Exception):\n # logging.warn('read utterance {} error'.format(x[0]))\n pass\n # Sort it because sorting is required in pack/pad operation\n order = np.argsort(lengths)[::-1]\n sorted_keys = [keys[i] for i in order]\n sorted_feats = [feats[i] for i in order]\n labels = [x[2].split() for x in batch]\n labels = [np.fromiter(map(int, x), dtype=np.int32) for x in labels]\n sorted_labels = [labels[i] for i in order]\n return sorted_keys, sorted_feats, sorted_labels\n\n\nclass CollateFunc(object):\n \"\"\" Collate function for AudioDataset\n \"\"\"\n def __init__(\n self,\n feature_dither=0.0,\n speed_perturb=False,\n spec_aug=False,\n spec_aug_conf=None,\n spec_sub=False,\n spec_sub_conf=None,\n raw_wav=True,\n feature_extraction_conf=None,\n wav_distortion_conf=None,\n ):\n \"\"\"\n Args:\n raw_wav:\n True if input is raw wav and feature extraction is needed.\n False if input is extracted feature\n \"\"\"\n self.wav_distortion_conf = wav_distortion_conf\n self.feature_extraction_conf = feature_extraction_conf\n self.spec_aug = spec_aug\n self.feature_dither = feature_dither\n self.speed_perturb = speed_perturb\n self.raw_wav = raw_wav\n self.spec_aug_conf = spec_aug_conf\n self.spec_sub = spec_sub\n self.spec_sub_conf = spec_sub_conf\n\n def __call__(self, batch):\n assert (len(batch) == 1)\n if self.raw_wav:\n keys, xs, ys = _extract_feature(batch[0], self.speed_perturb,\n self.wav_distortion_conf,\n self.feature_extraction_conf)\n\n else:\n keys, xs, ys = _load_feature(batch[0])\n\n train_flag = True\n if ys is None:\n train_flag = False\n\n # optional feature dither d ~ (-a, a) on fbank feature\n # a ~ (0, 0.5)\n if self.feature_dither != 0.0:\n a = random.uniform(0, self.feature_dither)\n xs = [x + (np.random.random_sample(x.shape) - 0.5) * a for x in xs]\n\n # optinoal spec substitute\n if self.spec_sub:\n xs = [_spec_substitute(x, **self.spec_sub_conf) for x in xs]\n\n # optinoal spec augmentation\n if self.spec_aug:\n xs = [_spec_augmentation(x, **self.spec_aug_conf) for x in xs]\n\n # padding\n xs_lengths = torch.from_numpy(\n np.array([x.shape[0] for x in xs], dtype=np.int32))\n\n # pad_sequence will FAIL in case xs is empty\n if len(xs) > 0:\n xs_pad = pad_sequence([torch.from_numpy(x).float() for x in xs],\n True, 0)\n else:\n xs_pad = torch.Tensor(xs)\n if train_flag:\n ys_lengths = torch.from_numpy(\n np.array([y.shape[0] for y in ys], dtype=np.int32))\n if len(ys) > 0:\n ys_pad = pad_sequence([torch.from_numpy(y).int() for y in ys],\n True, IGNORE_ID)\n else:\n ys_pad = torch.Tensor(ys)\n else:\n ys_pad = None\n ys_lengths = None\n return keys, xs_pad, ys_pad, xs_lengths, ys_lengths\n\n\nclass AudioDataset(Dataset):\n def __init__(self,\n data_file,\n max_length=10240,\n min_length=0,\n token_max_length=200,\n token_min_length=1,\n batch_type='static',\n batch_size=1,\n max_frames_in_batch=0,\n sort=True,\n raw_wav=True):\n \"\"\"Dataset for loading audio data.\n\n Attributes::\n data_file: input data file\n Plain text data file, each line contains following 7 fields,\n which is split by '\\t':\n utt:utt1\n feat:tmp/data/file1.wav or feat:tmp/data/fbank.ark:30\n feat_shape: 4.95(in seconds) or feat_shape:495,80(495 is in frames)\n text:i love you\n token: i <space> l o v e <space> y o u\n tokenid: int id of this token\n token_shape: M,N # M is the number of token, N is vocab size\n max_length: drop utterance which is greater than max_length(10ms)\n min_length: drop utterance which is less than min_length(10ms)\n token_max_length: drop utterance which is greater than token_max_length,\n especially when use char unit for english modeling\n token_min_length: drop utterance which is less than token_max_length\n batch_type: static or dynamic, see max_frames_in_batch(dynamic)\n batch_size: number of utterances in a batch,\n it's for static batch size.\n max_frames_in_batch: max feature frames in a batch,\n when batch_type is dynamic, it's for dynamic batch size.\n Then batch_size is ignored, we will keep filling the\n batch until the total frames in batch up to max_frames_in_batch.\n sort: whether to sort all data, so the utterance with the same\n length could be filled in a same batch.\n raw_wav: use raw wave or extracted featute.\n if raw wave is used, dynamic waveform-level augmentation could be used\n and the feature is extracted by torchaudio.\n if extracted featute(e.g. by kaldi) is used, only feature-level\n augmentation such as specaug could be used.\n \"\"\"\n assert batch_type in ['static', 'dynamic']\n data = []\n\n # Open in utf8 mode since meet encoding problem\n with codecs.open(data_file, 'r', encoding='utf-8') as f:\n for line in f:\n arr = line.strip().split('\\t')\n if len(arr) != 7:\n continue\n key = arr[0].split(':')[1]\n tokenid = arr[5].split(':')[1]\n output_dim = int(arr[6].split(':')[1].split(',')[1])\n if raw_wav:\n wav_path = ':'.join(arr[1].split(':')[1:])\n duration = int(float(arr[2].split(':')[1]) * 1000 / 10)\n data.append((key, wav_path, duration, tokenid))\n else:\n feat_ark = ':'.join(arr[1].split(':')[1:])\n feat_info = arr[2].split(':')[1].split(',')\n feat_dim = int(feat_info[1].strip())\n num_frames = int(feat_info[0].strip())\n data.append((key, feat_ark, num_frames, tokenid))\n self.input_dim = feat_dim\n self.output_dim = output_dim\n if sort:\n data = sorted(data, key=lambda x: x[2])\n valid_data = []\n for i in range(len(data)):\n length = data[i][2]\n token_length = len(data[i][3].split())\n # remove too lang or too short utt for both input and output\n # to prevent from out of memory\n if length > max_length or length < min_length:\n # logging.warn('ignore utterance {} feature {}'.format(\n # data[i][0], length))\n pass\n elif token_length > token_max_length or token_length < token_min_length:\n pass\n else:\n valid_data.append(data[i])\n data = valid_data\n self.minibatch = []\n num_data = len(data)\n # Dynamic batch size\n if batch_type == 'dynamic':\n assert (max_frames_in_batch > 0)\n self.minibatch.append([])\n num_frames_in_batch = 0\n for i in range(num_data):\n length = data[i][2]\n num_frames_in_batch += length\n if num_frames_in_batch > max_frames_in_batch:\n self.minibatch.append([])\n num_frames_in_batch = length\n self.minibatch[-1].append((data[i][0], data[i][1], data[i][3]))\n # Static batch size\n else:\n cur = 0\n while cur < num_data:\n end = min(cur + batch_size, num_data)\n item = []\n for i in range(cur, end):\n item.append((data[i][0], data[i][1], data[i][3]))\n self.minibatch.append(item)\n cur = end\n\n def __len__(self):\n return len(self.minibatch)\n\n def __getitem__(self, idx):\n return self.minibatch[idx]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('type', help='config file')\n parser.add_argument('config_file', help='config file')\n parser.add_argument('data_file', help='input data file')\n args = parser.parse_args()\n\n with open(args.config_file, 'r') as fin:\n configs = yaml.load(fin, Loader=yaml.FullLoader)\n\n # Init dataset and data loader\n collate_conf = copy.copy(configs['collate_conf'])\n if args.type == 'raw_wav':\n raw_wav = True\n else:\n raw_wav = False\n collate_func = CollateFunc(**collate_conf, raw_wav=raw_wav)\n dataset_conf = configs.get('dataset_conf', {})\n dataset = AudioDataset(args.data_file, **dataset_conf, raw_wav=raw_wav)\n\n data_loader = DataLoader(dataset,\n batch_size=1,\n shuffle=True,\n sampler=None,\n num_workers=0,\n collate_fn=collate_func)\n\n for i, batch in enumerate(data_loader):\n print(i)\n # print(batch[1].shape)\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.copy",
"torch.from_numpy",
"numpy.random.random_sample",
"torch.utils.data.DataLoader",
"numpy.argsort",
"torch.Tensor"
]
] |
vishalbelsare/xam | [
"93c066990d976c7d4d74b63fb6fb3254ee8d9b48"
] | [
"xam/ensemble/stacking.py"
] | [
"import collections\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom sklearn import preprocessing\nfrom sklearn import utils\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import ClassifierMixin\nfrom sklearn.base import clone\nfrom sklearn.base import RegressorMixin\nfrom sklearn.base import MetaEstimatorMixin\n\n\nclass BaseStackingEstimator(BaseEstimator, MetaEstimatorMixin):\n\n def __init__(self, models, meta_model, cv, metric, use_base_features, use_probas):\n self.models = models\n self.meta_model = meta_model\n self.cv = cv\n self.metric = metric\n self.use_base_features = use_base_features\n self.use_probas = use_probas\n\n def fit(self, X, y=None, verbose=False, **fit_params):\n\n # meta_features_ have as many rows as there are in X and as many\n # columns as there are models. However, if use_probas is True then\n # ((n_classes - 1) * n_models) columns have to be stored\n if self.use_probas:\n self.n_probas_ = len(np.unique(y)) - 1\n meta_features = np.empty((len(X), len(self.models) * (self.n_probas_)))\n else:\n meta_features = np.empty((len(X), len(self.models)))\n\n self.oof_scores_ = collections.defaultdict(list)\n\n if self.use_probas:\n lb = preprocessing.LabelBinarizer().fit(y)\n\n for i, (fit_idx, val_idx) in enumerate(self.cv.split(X, y)):\n for j, (name, model) in enumerate(self.models.items()):\n\n # Split the data according to the current folds\n if isinstance(X, pd.DataFrame):\n X_fit, X_val = X.iloc[fit_idx], X.iloc[val_idx]\n else:\n X_fit, X_val = X[fit_idx], X[val_idx]\n\n if isinstance(y, pd.Series):\n y_fit, y_val = y.iloc[fit_idx], y.iloc[val_idx]\n else:\n y_fit, y_val = y[fit_idx], y[val_idx]\n\n # Train the model on the training fold\n model.fit(X_fit, y_fit, **fit_params.get(name, {}))\n\n # If use_probas is True then the probabilities of each class for\n # each model have to be predicted and then stored into\n # meta_features\n if self.use_probas:\n val_pred = model.predict_proba(X_val)\n val_score = self.metric(y_val, lb.inverse_transform(val_pred))\n a = self.n_probas_ * j\n b = self.n_probas_ * (j + 1)\n meta_features[val_idx, a:b] = val_pred[:, 0:self.n_probas_]\n else:\n val_pred = model.predict(X_val)\n meta_features[val_idx, j] = val_pred\n val_score = self.metric(y_val, val_pred)\n\n # Store the model's score on the validation fold\n self.oof_scores_[name].append(val_score)\n\n if verbose:\n print('OOF {} for fold {}: {:.5f}'.format(name, (i+1), val_score))\n\n if verbose:\n for name, scores in self.oof_scores_.items():\n print('OOF {} mean: {:.5f} (± {:.5f})'.format(name, np.mean(scores), np.std(scores)))\n\n # Combine the predictions with the original features\n if self.use_base_features:\n meta_features = np.hstack((meta_features, X))\n\n self.meta_model.fit(meta_features, y)\n\n # Each model has to be fit on all the data for further predictions\n for model in self.models.values():\n model.fit(X, y)\n\n return self\n\n def _predict(self, X, predict_proba):\n\n # If use_probas is True then the probabilities of each class for each\n # model have to be predicted and then stored into meta_features\n if self.use_probas:\n meta_features = np.empty((len(X), len(self.models) * (self.n_probas_)))\n for i, model in enumerate(self.models.values()):\n probabilities = model.predict_proba(X)\n for j, k in enumerate(range(self.n_probas_ * i, self.n_probas_ * (i + 1))):\n meta_features[:, k] = probabilities[:, j]\n else:\n meta_features = np.transpose([model.predict(X) for model in self.models.values()])\n\n if self.use_base_features:\n meta_features = np.hstack((meta_features, X))\n\n if predict_proba:\n return self.meta_model.predict_proba(meta_features)\n return self.meta_model.predict(meta_features)\n\n def predict(self, X):\n return self._predict(X, predict_proba=False)\n\n\nclass StackingClassifier(BaseStackingEstimator, ClassifierMixin):\n\n def __init__(self, models, meta_model, cv=model_selection.StratifiedKFold(n_splits=3),\n metric=metrics.roc_auc_score, use_base_features=False, use_probas=True):\n super().__init__(\n models=models,\n meta_model=meta_model,\n cv=cv,\n metric=metric,\n use_base_features=use_base_features,\n use_probas=use_probas,\n )\n\n def predict_proba(self, X):\n return self._predict(X, predict_proba=True)\n\n\nclass StackingRegressor(BaseStackingEstimator, RegressorMixin):\n\n def __init__(self, models, meta_model, cv=model_selection.KFold(n_splits=3),\n metric=metrics.mean_squared_error, use_base_features=False):\n super().__init__(\n models=models,\n meta_model=meta_model,\n cv=cv,\n metric=metric,\n use_base_features=use_base_features,\n use_probas=False\n )\n\n\nclass BaggedStackingEstimator(BaseEstimator, MetaEstimatorMixin):\n\n def __init__(self, models, meta_model, cv, metric, use_base_features, use_probas, fit_handlers):\n self.models = models\n self.meta_model = meta_model\n self.cv = cv\n self.metric = metric\n self.use_base_features = use_base_features\n self.use_probas = use_probas\n self.fit_handlers = fit_handlers\n\n def fit(self, X, y=None, verbose=False):\n\n # meta_features_ is of shape (len(X), len(models)); if use_probas is\n # True then (n_classes - 1) columns have to be stored per model\n if self.use_probas:\n self.n_probas_ = len(np.unique(y)) - 1\n meta_features = np.empty((len(X), len(self.models) * (self.n_probas_)))\n else:\n meta_features = np.empty((len(X), len(self.models)))\n\n self.oof_scores_ = collections.defaultdict(list)\n self.instances_ = collections.defaultdict(list)\n\n if self.use_probas:\n lb = preprocessing.LabelBinarizer().fit(y)\n\n for i, (fit_idx, val_idx) in enumerate(self.cv.split(X, y)):\n for j, (name, model) in enumerate(self.models.items()):\n\n # Split the data according to the current folds\n if isinstance(X, pd.DataFrame):\n X_fit, X_val = X.iloc[fit_idx], X.iloc[val_idx]\n else:\n X_fit, X_val = X[fit_idx], X[val_idx]\n\n if isinstance(y, pd.Series):\n y_fit, y_val = y.iloc[fit_idx], y.iloc[val_idx]\n else:\n y_fit, y_val = y[fit_idx], y[val_idx]\n\n # Train the model on the training fold\n fit_handler = self.fit_handlers.get(name, lambda a, b, c, d: {})\n instance = clone(model)\n instance = instance.fit(X_fit, y_fit, **fit_handler(X_fit, y_fit, X_val, y_val))\n self.instances_[name].append(instance)\n\n # If use_probas is True then the probabilities of each class for\n # each model have to be predicted and then stored into\n # meta_features\n if self.use_probas:\n val_pred = instance.predict_proba(X_val)\n val_score = self.metric(y_val, lb.inverse_transform(val_pred))\n a = self.n_probas_ * j\n b = self.n_probas_ * (j + 1)\n meta_features[val_idx, a:b] = val_pred[:, 0:self.n_probas_]\n else:\n val_pred = instance.predict(X_val)\n meta_features[val_idx, j] = val_pred\n val_score = self.metric(y_val, val_pred)\n\n # Store the model's score on the validation fold\n self.oof_scores_[name].append(val_score)\n\n if verbose:\n print('OOF {} for fold {}: {:.5f}'.format(name, (i+1), val_score))\n\n if verbose:\n for name, scores in self.oof_scores_.items():\n print('OOF {} mean: {:.5f} (± {:.5f})'.format(name, np.mean(scores), np.std(scores)))\n\n # Combine the predictions with the original features\n if self.use_base_features:\n meta_features = np.hstack((meta_features, X))\n\n # Train the meta-model\n self.meta_model = self.meta_model.fit(meta_features, y)\n\n return self\n\n def _predict(self, X, predict_proba):\n\n utils.validation.check_is_fitted(self, ['oof_scores_', 'instances_'])\n\n # If use_probas is True then the probabilities of each class for each\n # model have to be predicted and then stored into meta_features\n if self.use_probas:\n meta_features = np.empty((len(X), len(self.models) * self.n_probas_))\n for i, name in enumerate(self.models):\n\n # Bag the predictions of each model instance\n instances = self.instances_[name]\n probabilities = np.mean([instance.predict_proba(X) for instance in instances], 0)\n\n # Add the predictions to the set of meta-features\n a = self.n_probas_ * i\n b = self.n_probas_ * (i + 1)\n meta_features[:, a:b] = probabilities[:, 0:self.n_probas_]\n\n else:\n # Bag the predictions of each model instance\n meta_features = np.transpose([\n np.mean([instance.predict(X) for instance in self.instances_[name]], 0)\n for name in self.models\n ])\n\n if self.use_base_features:\n meta_features = np.hstack((meta_features, X))\n\n if predict_proba:\n return self.meta_model.predict_proba(meta_features)\n return self.meta_model.predict(meta_features)\n\n def predict(self, X):\n return self._predict(X, predict_proba=False)\n\n\nclass BaggedStackingClassifier(BaggedStackingEstimator, ClassifierMixin):\n\n def __init__(self, models, meta_model, cv=model_selection.StratifiedKFold(n_splits=3),\n metric=metrics.roc_auc_score, use_base_features=False, use_probas=True,\n fit_handlers={}):\n super().__init__(\n models=models,\n meta_model=meta_model,\n cv=cv,\n metric=metric,\n use_base_features=use_base_features,\n use_probas=use_probas,\n fit_handlers=fit_handlers\n )\n\n def predict_proba(self, X):\n return super()._predict(X, predict_proba=True)\n\n\nclass BaggedStackingRegressor(BaggedStackingEstimator, RegressorMixin):\n\n def __init__(self, models, meta_model, cv=model_selection.KFold(n_splits=3),\n metric=metrics.mean_squared_error, use_base_features=False,\n fit_handlers={}):\n super().__init__(\n models=models,\n meta_model=meta_model,\n cv=cv,\n metric=metric,\n use_base_features=use_base_features,\n use_probas=False,\n fit_handlers=fit_handlers\n )\n"
] | [
[
"sklearn.utils.validation.check_is_fitted",
"sklearn.model_selection.StratifiedKFold",
"numpy.mean",
"numpy.std",
"sklearn.model_selection.KFold",
"numpy.hstack",
"sklearn.base.clone",
"sklearn.preprocessing.LabelBinarizer",
"numpy.unique"
]
] |
tingyingwu2010/rsome | [
"4fec4694ac40de8b0de484728f4abda9ac63d563"
] | [
"ro.py"
] | [
"from .socp import Model as SOCModel\nfrom .lp import LinConstr, Bounds, CvxConstr, ConeConstr\nfrom .lp import Vars, VarSub, Affine, Convex\nfrom .lp import DecRule, DecRuleSub\nfrom .lp import RoAffine, RoConstr\nfrom .lp import Solution\nfrom .subroutines import *\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom collections import Iterable\nfrom .lpg_solver import solve as def_sol\n\n\nclass Model:\n \"\"\"\n The Model class creates an object of robust optimization models\n \"\"\"\n\n def __init__(self, name=None):\n\n self.rc_model = SOCModel(mtype='R')\n self.sup_model = SOCModel(nobj=True, mtype='S')\n\n self.all_constr = []\n\n self.obj = None\n self.obj_support = None\n self.sign = 1\n\n self.primal = None\n self.dual = None\n self.solution = None\n self.pupdate = True\n self.dupdate = True\n\n self.solution = None\n\n self.name = name\n\n def reset(self):\n\n self.all_constr = []\n self.pupdate = True\n self.dupdate = True\n self.primal = None\n self.dual = None\n self.rc_model.reset()\n\n def dvar(self, shape=(1,), vtype='C', name=None, aux=False):\n \"\"\"\n Returns an array of decision variables with the given shape\n and variable type.\n\n Parameters\n ----------\n shape : int or tuple\n Shape of the variable array.\n vtype : {'C', 'B', 'I'}\n Type of the decision variables. 'C' means continuous; 'B'\n means binary, and 'I\" means integer.\n name : str\n Name of the variable array\n aux : leave it unspecified.\n\n Returns\n -------\n new_var : rsome.lp.Vars\n An array of new decision variables\n \"\"\"\n\n new_var = self.rc_model.dvar(shape, vtype, name, aux)\n return new_var\n\n def rvar(self, shape=(1,), name=None):\n\n \"\"\"\n Returns an array of random variables with the given shape.\n\n Parameters\n ----------\n shape : int or tuple\n Shape of the variable array.\n name : str\n Name of the variable array\n\n Returns\n -------\n new_var : rsome.lp.Vars\n An array of new random variables\n \"\"\"\n\n new_var = self.sup_model.dvar(shape, 'C', name)\n return new_var\n\n def ldr(self, shape=(1,), name=None):\n\n \"\"\"\n Returns an array with the given shape of linear decision rule\n variables.\n\n Parameters\n ----------\n shape : int or tuple\n Shape of the variable array.\n name : str\n Name of the variable array\n\n Returns\n -------\n new_var : rsome.ro.DecRule\n An array of new linear decision rule variables\n \"\"\"\n\n new_ldr = DecRule(self, shape, name)\n return new_ldr\n\n def min(self, obj):\n\n if obj.size > 1:\n raise ValueError('Incorrect function dimension.')\n\n self.obj = obj\n self.sign = 1\n self.pupdate = True\n self.dupdate = True\n\n def max(self, obj):\n\n if obj.size > 1:\n raise ValueError('Incorrect function dimension.')\n\n self.obj = obj\n self.sign = - 1\n self.pupdate = True\n self.dupdate = True\n\n def minmax(self, obj, *args):\n\n if np.prod(obj.shape) > 1:\n raise ValueError('Incorrect function dimension.')\n\n constraints = []\n for items in args:\n if isinstance(items, Iterable):\n constraints.extend(list(items))\n else:\n constraints.append(items)\n\n sup_model = self.sup_model\n sup_model.reset()\n for item in constraints:\n if item.model is not sup_model:\n raise SyntaxError('Models mismatch.')\n sup_model.st(item)\n\n self.obj = obj\n self.obj_support = sup_model.do_math(primal=False)\n self.sign = 1\n self.pupdate = True\n self.dupdate = True\n\n def maxmin(self, obj, *args):\n\n if np.prod(obj.shape) > 1:\n raise ValueError('Incorrect function dimension.')\n\n constraints = []\n for items in args:\n if isinstance(items, Iterable):\n constraints.extend(list(items))\n else:\n constraints.append(items)\n\n sup_model = self.sup_model\n sup_model.reset()\n for item in constraints:\n if item.model is not sup_model:\n raise SyntaxError('Models mismatch.')\n sup_model.st(item)\n\n self.obj = obj\n self.obj_support = sup_model.do_math(primal=False)\n self.sign = - 1\n self.pupdate = True\n self.dupdate = True\n\n def st(self, *arg):\n\n for constr in arg:\n if isinstance(constr, Iterable):\n for item in constr:\n self.st(item)\n\n elif isinstance(constr, (LinConstr, Bounds, CvxConstr, ConeConstr)):\n if (constr.model is not self.rc_model) or \\\n (constr.model.mtype != 'R'):\n raise ValueError('Models mismatch.')\n self.all_constr.append(constr)\n elif isinstance(constr, RoConstr):\n if (constr.dec_model is not self.rc_model) or \\\n (constr.rand_model is not self.sup_model):\n raise ValueError('Models mismatch.')\n sense = (constr.sense[0] if isinstance(constr.sense,\n np.ndarray)\n else constr.sense)\n if sense == 0:\n self.all_constr.append(constr)\n else:\n left = RoAffine(constr.raffine, constr.affine,\n constr.rand_model)\n right = RoAffine(-constr.raffine, -constr.affine,\n constr.rand_model)\n self.all_constr.append(RoConstr(left, sense=0))\n self.all_constr.append(RoConstr(right, sense=0))\n else:\n raise TypeError('Unknown type of constraints')\n\n def do_math(self, primal=True):\n\n if primal:\n if self.primal is not None and not self.pupdate:\n return self.primal\n else:\n if self.dual is not None and not self.dupdate:\n return self.dual\n else:\n self.do_math(primal=True)\n return self.rc_model.do_math(False, obj=True)\n\n self.rc_model.reset()\n if isinstance(self.obj, (Vars, VarSub, Affine, Convex)):\n self.rc_model.obj = self.obj\n self.rc_model.sign = self.sign\n more_roc = []\n elif isinstance(self.obj, RoAffine):\n obj_constr = (self.rc_model.vars[0] >= self.sign * self.obj)\n obj_constr.support = self.obj_support\n more_roc = [obj_constr]\n else:\n raise TypeError('Incorrect type for the objective function.')\n\n for constr in self.all_constr + more_roc:\n if isinstance(constr, (LinConstr, Bounds, CvxConstr)):\n self.rc_model.st(constr)\n if isinstance(constr, RoConstr):\n if constr.support:\n rc_constrs = constr.le_to_rc()\n else:\n rc_constrs = constr.le_to_rc(self.obj_support)\n for rc_constr in rc_constrs:\n self.rc_model.st(rc_constr)\n\n formula = self.rc_model.do_math(primal, obj=True)\n\n if primal:\n self.primal = formula\n self.pupdate = False\n else:\n self.dual = formula\n self.dupdate = False\n\n return formula\n\n def solve(self, solver=None, display=True, export=False, params={}):\n \"\"\"\n Solve the model with the selected solver interface.\n\n Parameters\n ----------\n solver : {None, lpg_solver, grb_solver, msk_solver}\n Solver interface used for model solution. Use default solver\n if solver=None.\n display : bool\n Display option of the solver interface.\n export : bool\n Export option of the solver interface. A standard model file\n is generated if the option is True.\n params : dict\n A dictionary that specifies parameters of the selected solver.\n So far the argument only applies to Gurobi and MOSEK.\n \"\"\"\n\n if solver is None:\n solution = def_sol(self.do_math(), display, export, params)\n else:\n solution = solver.solve(self.do_math(), display, export, params)\n\n if isinstance(solution, Solution):\n self.rc_model.solution = solution\n else:\n if solution is None:\n self.rc_model.solution = None\n else:\n x = solution.x\n self.rc_model.solution = Solution(x[0], x, solution.status)\n\n self.solution = self.rc_model.solution\n\n def get(self):\n\n if self.rc_model.solution is None:\n raise SyntaxError('The model is unsolved or no feasible solution.')\n return self.sign * self.rc_model.solution.objval\n"
] | [
[
"numpy.prod"
]
] |
oisindoherty3/drem | [
"478fe4e72fd38628f4ddc3745c16efe75ee98e4d"
] | [
"src/drem/utilities/pandas_tasks.py"
] | [
"from pathlib import Path\nfrom typing import Any\nfrom typing import Iterable\nfrom typing import List\nfrom typing import Union\n\nimport pandas as pd\n\nfrom icontract import require\nfrom prefect import Task\nfrom prefect import task\n\n\n@task\n@require(\n lambda df, column_names: set(column_names).issubset(set(df.columns)),\n \"df.columns doesn't contain all names in columns!\",\n)\ndef get_columns(df: pd.DataFrame, column_names: Iterable[str]) -> pd.DataFrame:\n \"\"\"Access a group of rows by label(s) or a boolean array.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html\n\n Args:\n df (pd.DataFrame): Any single-indexed Pandas DataFrame\n column_names (Iterable[str]): Names of columns to be extracted\n\n Returns:\n pd.DataFrame: A new DataFrame containing only the specified columns\n \"\"\"\n return df.copy().loc[:, column_names]\n\n\n@task\n@require(\n lambda df, target: set(target).issubset(set(df.columns)),\n \"df.columns doesn't contain all names in columns!\",\n)\ndef get_sum_of_columns(\n df: pd.DataFrame, target: Union[str, Iterable[str]], result: str,\n) -> pd.DataFrame:\n \"\"\"Get sum of target DataFrame columns.\n\n Args:\n df (pd.DataFrame): Any single-indexed Pandas DataFrame\n target (Union[str, Iterable[str]]): Names of columns to be summed\n result (str): Name of result column\n\n Returns:\n pd.DataFrame: [description]\n \"\"\"\n df[result] = df[target].copy().sum(axis=1)\n\n return df\n\n\n@task\ndef get_rows_where_column_contains_substring(\n df: pd.DataFrame, target: str, substring: str,\n) -> pd.DataFrame:\n \"\"\"Get rows where target columns contains substring.\n\n Args:\n df (pd.DataFrame): Any single-indexed Pandas DataFrame\n target (str): Name of target column\n substring (str): Substring to be queried in target column\n\n Returns:\n pd.DataFrame: A copy of df containing only rows where column contains substring\n \"\"\"\n rows = df[target].str.contains(substring)\n return df.copy()[rows].reset_index(drop=True)\n\n\nclass GetRowsWhereColumnContainsSubstring(Task):\n \"\"\"Create prefect.Task to Get rows where target columns contains substring.\n\n Args:\n Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html\n \"\"\"\n\n def run(self, df: pd.DataFrame, target: str, substring: str) -> pd.DataFrame:\n \"\"\"Run Task.\n\n Args:\n df (pd.DataFrame): Any single-indexed Pandas DataFrame\n target (str): Name of target column\n substring (str): Substring to be queried in target column\n\n Returns:\n pd.DataFrame: A copy of df containing only rows where column contains substring\n \"\"\"\n df = df.copy()\n rows = df[target].str.contains(substring)\n return df[rows].reset_index(drop=True)\n\n\n@task\ndef rename(df: pd.DataFrame, **kwargs: Any) -> pd.DataFrame:\n \"\"\"Alter axes labels.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rename.html\n\n Args:\n df (pd.DataFrame): Any DataFrame\n **kwargs (Any): Keyword arguments to pass to pandas\n\n Returns:\n pd.DataFrame: DataFrame with axes labels altered\n \"\"\"\n return df.rename(**kwargs)\n\n\nclass Rename(Task):\n \"\"\"Create prefect.Task to Alter axes labels.\n\n Args:\n Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html\n \"\"\"\n\n def run(self, df: pd.DataFrame, **kwargs: Any) -> pd.DataFrame:\n \"\"\"Run Task.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rename.html\n\n Args:\n df (pd.DataFrame): Any DataFrame\n **kwargs (Any): Keyword arguments to pass to pandas\n\n Returns:\n pd.DataFrame: DataFrame with axes labels altered\n \"\"\"\n return df.rename(**kwargs)\n\n\n@task(name=\"Read Parquet file\")\ndef read_parquet(filepath: Path, **kwargs: Any) -> pd.DataFrame:\n \"\"\"Load a parquet object from the file path, returning a DataFrame.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_parquet.html\n\n Args:\n filepath (Path): Path to file\n **kwargs (Any): Passed to pandas.read_parquet\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n return pd.read_parquet(filepath, **kwargs)\n\n\n@task(name=\"Read HTML File\")\ndef read_html(filepath: Path, **kwargs: Any) -> List[pd.DataFrame]:\n \"\"\"Read HTML tables into a list of DataFrame objects.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_html.html\n\n Args:\n filepath (Path): Path to file\n **kwargs (Any): Passed to pandas.read_html\n\n Returns:\n List[pd.DataFrame]: A list of DataFrames.\n \"\"\"\n return pd.read_html(str(filepath), **kwargs)\n\n\n@task\ndef replace_substring_in_column(\n df: pd.DataFrame, target: str, result: str, **kwargs: Any,\n) -> pd.DataFrame:\n \"\"\"Replace substring in DataFrame string column.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html\n\n Args:\n df (pd.DataFrame): DataFrame\n target (str): Name of target column\n result (str): Name of result column\n **kwargs (Any): Passed to pandas.Series.str.replace\n\n Returns:\n pd.DataFrame: A copy of the object with all matching occurrences of pat replaced by repl.\n \"\"\"\n df = df.copy()\n\n df[result] = df[target].astype(str).str.replace(**kwargs)\n\n return df\n\n\nclass ReplaceSubstringInColumn(Task):\n \"\"\"Create prefect.Task to Replace Substring in DataFrame Column.\n\n Args:\n Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html\n \"\"\"\n\n def run(\n self, df: pd.DataFrame, target: str, result: str, **kwargs: Any,\n ) -> pd.DataFrame:\n \"\"\"Run Task.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html\n\n Args:\n df (pd.DataFrame): DataFrame\n target (str): Name of target column\n result (str): Name of result column\n **kwargs (Any): Passed to pandas.Series.str.replace\n\n Returns:\n pd.DataFrame: A copy of the object with all matching occurrences of pat replaced by repl.\n \"\"\"\n df = df.copy()\n\n df[result] = df[target].astype(str).str.replace(**kwargs)\n\n return df\n\n\n@task\ndef dropna(df: pd.DataFrame, **kwargs: Any) -> pd.DataFrame:\n \"\"\"Remove missing values.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html\n\n Args:\n df (pd.DataFrame): DataFrame\n **kwargs (Any): Passed to pandas.DataFrame.dropna\n\n Returns:\n pd.DataFrame: A copy of the DataFrame with NA entries dropped from it.\n \"\"\"\n df = df.copy()\n\n return df.dropna(**kwargs)\n\n\n@task\ndef merge(left: pd.DataFrame, right: pd.DataFrame, **kwargs: Any) -> pd.DataFrame:\n \"\"\"Merge DataFrame or named Series objects with a database-style join.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.merge.html\n\n Args:\n left (pd.DataFrame): Object to be merged.\n right (pd.DataFrame): Object to merge with.\n **kwargs (Any): Passed to pandas.DataFrame.merge\n\n Returns:\n pd.DataFrame: A DataFrame of the two merged objects.\n \"\"\"\n left = left.copy()\n\n return left.merge(right, **kwargs)\n\n\nclass Merge(Task):\n \"\"\"Create prefect.Task to merge DataFrames with a database-style join.\n\n Args:\n Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html\n \"\"\"\n\n def run(\n self, left: pd.DataFrame, right: pd.DataFrame, **kwargs: Any,\n ) -> pd.DataFrame:\n \"\"\"Run Task.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.merge.html\n\n Args:\n left (pd.DataFrame): Object to be merged.\n right (pd.DataFrame): Object to merge with.\n **kwargs (Any): Passed to pandas.DataFrame.merge\n\n Returns:\n pd.DataFrame: A DataFrame of the two merged objects.\n \"\"\"\n left = left.copy()\n\n return left.merge(right, **kwargs)\n\n\n@task\ndef get_rows_by_index(df: pd.DataFrame, row_indexes: Iterable[str]) -> pd.DataFrame:\n \"\"\"Access a group of rows by integer-location based indexing.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html\n\n Args:\n df (pd.DataFrame): DataFrame\n row_indexes (Iterable[str]): Names of rows to be extracted\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n return df.copy().iloc[row_indexes, :]\n\n\n@task\ndef concat(**kwargs: Any) -> pd.DataFrame:\n \"\"\"Concatenate pandas objects along a particular axis with optional set logic along the other axes.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html\n\n Args:\n **kwargs (Any): Passed to pandas.concat\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n return pd.concat(**kwargs)\n\n\nclass Concat(Task):\n \"\"\"Create prefect.Task to concatenate DataFrames.\n\n Args:\n Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html\n \"\"\"\n\n def run(self, **kwargs: Any) -> pd.DataFrame:\n \"\"\"Run Task.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html\n\n Args:\n **kwargs (Any): Passed to pandas.concat\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n return pd.concat(**kwargs)\n\n\n@task\ndef replace(\n df: pd.DataFrame, target: str, result: str, to_replace: Any, value: Any,\n) -> pd.DataFrame:\n \"\"\"Replace values given in to_replace with value.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html\n\n Args:\n df (pd.DataFrame): DataFrame\n target (str): Name of target column\n result (str): Name of result column\n to_replace (Any): Values that will be replaced\n value (Any): Value to replace any values matching to_replace with\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n df = df.copy()\n\n df[result] = df[target].replace(to_replace, value)\n\n return df\n\n\nclass Replace(Task):\n \"\"\"Create prefect.Task to replace values in DataFrame with X.\n\n Args:\n Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html\n \"\"\"\n\n def run(\n self, df: pd.DataFrame, target: str, result: str, to_replace: Any, value: Any,\n ) -> pd.DataFrame:\n \"\"\"Run Task.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html\n\n Args:\n df (pd.DataFrame): DataFrame\n target (str): Name of target column\n result (str): Name of result column\n to_replace (Any): Values that will be replaced\n value (Any): Value to replace any values matching to_replace with\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n df = df.copy()\n\n df[result] = df[target].replace(to_replace, value)\n\n return df\n\n\n@task\ndef groupby_sum(df: pd.DataFrame, by: Iterable[str], target: str) -> pd.DataFrame:\n \"\"\"Group DataFrame using a mapper or by a Series of columns.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html\n\n Args:\n df (pd.DataFrame): DataFrame\n by (Iterable[str]): The names of columns by to be grouped.\n target (str): The name of the column to be summed.\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n df = df.copy()\n\n return df.groupby(by=by, as_index=False)[target].sum()\n\n\nclass GroupbySum(Task):\n \"\"\"Create prefect.Task to sum a select column for each DataFrame group.\n\n Args:\n Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html\n \"\"\"\n\n def run(self, df: pd.DataFrame, by: Iterable[str], target: str) -> pd.DataFrame:\n \"\"\"Run Task.\n\n See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html\n\n Args:\n df (pd.DataFrame): DataFrame\n by (Iterable[str]): The names of columns by to be grouped.\n target (str): The name of the column to be summed.\n\n Returns:\n pd.DataFrame: DataFrame\n \"\"\"\n df = df.copy()\n\n return df.groupby(by=by, as_index=False)[target].sum()\n"
] | [
[
"pandas.read_parquet",
"pandas.concat"
]
] |
yulongfan/tryEverything | [
"2f66a8d33c3539e46d91527186bc52515ce5b14f"
] | [
"example/google_im2txt/102flower/train.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train the model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport configuration\nimport show_and_tell_model\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '3' # only /gpu:gpu_id is visible\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.flags.DEFINE_string(\"input_file_pattern\", \"\",\n \"File pattern of sharded TFRecord input files.\")\ntf.flags.DEFINE_string(\"inception_checkpoint_file\", \"\",\n \"Path to a pretrained inception_v3 model.\")\ntf.flags.DEFINE_string(\"train_dir\", \"\",\n \"Directory for saving and loading model checkpoints.\")\ntf.flags.DEFINE_boolean(\"train_inception\", False,\n \"Whether to train inception submodel variables.\")\ntf.flags.DEFINE_integer(\"number_of_steps\", 1000, \"Number of training steps.\")\ntf.flags.DEFINE_integer(\"log_every_n_steps\", 1,\n \"Frequency at which loss and global step are logged.\")\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef main(unused_argv):\n assert FLAGS.input_file_pattern, \"--input_file_pattern is required\"\n assert FLAGS.train_dir, \"--train_dir is required\"\n\n model_config = configuration.ModelConfig()\n model_config.input_file_pattern = FLAGS.input_file_pattern\n model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file\n training_config = configuration.TrainingConfig()\n\n # Create training directory.\n train_dir = FLAGS.train_dir\n if not tf.gfile.IsDirectory(train_dir):\n tf.logging.info(\"Creating training directory: %s\", train_dir)\n tf.gfile.MakeDirs(train_dir)\n\n # Build the TensorFlow graph.\n g = tf.Graph()\n with g.as_default():\n # Build the model.\n model = show_and_tell_model.ShowAndTellModel(\n model_config, mode=\"train\", train_inception=FLAGS.train_inception)\n model.build()\n\n # Set up the learning rate.\n learning_rate_decay_fn = None\n if FLAGS.train_inception:\n learning_rate = tf.constant(training_config.train_inception_learning_rate)\n else:\n learning_rate = tf.constant(training_config.initial_learning_rate)\n if training_config.learning_rate_decay_factor > 0:\n num_batches_per_epoch = (training_config.num_examples_per_epoch /\n model_config.batch_size)\n decay_steps = int(num_batches_per_epoch *\n training_config.num_epochs_per_decay)\n\n def _learning_rate_decay_fn(learning_rate, global_step):\n return tf.train.exponential_decay(\n learning_rate,\n global_step,\n decay_steps=decay_steps,\n decay_rate=training_config.learning_rate_decay_factor,\n staircase=True)\n\n learning_rate_decay_fn = _learning_rate_decay_fn\n\n # Set up the training ops.\n train_op = tf.contrib.layers.optimize_loss(\n loss=model.total_loss,\n global_step=model.global_step,\n learning_rate=learning_rate,\n optimizer=training_config.optimizer,\n clip_gradients=training_config.clip_gradients,\n learning_rate_decay_fn=learning_rate_decay_fn)\n\n # Set up the Saver for saving and restoring model checkpoints.\n saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)\n\n session_config = tf.ConfigProto()\n session_config.gpu_options.allow_growth = True\n\n # Run training.\n tf.contrib.slim.learning.train(\n train_op,\n train_dir,\n log_every_n_steps=FLAGS.log_every_n_steps,\n graph=g,\n global_step=model.global_step,\n number_of_steps=FLAGS.number_of_steps,\n init_fn=model.init_fn,\n saver=saver,\n session_config=session_config)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.gfile.IsDirectory",
"tensorflow.logging.set_verbosity",
"tensorflow.flags.DEFINE_string",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.Graph",
"tensorflow.train.Saver",
"tensorflow.logging.info",
"tensorflow.ConfigProto",
"tensorflow.contrib.slim.learning.train",
"tensorflow.gfile.MakeDirs",
"tensorflow.contrib.layers.optimize_loss",
"tensorflow.flags.DEFINE_integer",
"tensorflow.constant",
"tensorflow.train.exponential_decay",
"tensorflow.app.run"
]
] |
mthcom/Transformer | [
"4862e46edd2979eb6afa3209247ee0498835216d"
] | [
"Process.py"
] | [
"import pandas as pd\nimport torchtext\nfrom torchtext import data\nfrom Tokenize import tokenize\nfrom Batch import MyIterator, batch_size_fn\nimport os\nimport dill as pickle\n\ndef read_data(opt):\n \n if opt.src_data is not None:\n try:\n opt.src_data = open(opt.src_data).read().strip().split('\\n')\n except:\n print(\"error: '\" + opt.src_data + \"' file not found\")\n quit()\n \n if opt.trg_data is not None:\n try:\n opt.trg_data = open(opt.trg_data).read().strip().split('\\n')\n except:\n print(\"error: '\" + opt.trg_data + \"' file not found\")\n quit()\n\ndef create_fields(opt):\n \n spacy_langs = ['en', 'fr', 'de', 'es', 'pt', 'it', 'nl']\n if opt.src_lang not in spacy_langs:\n print('invalid src language: ' + opt.src_lang + 'supported languages : ' + spacy_langs) \n if opt.trg_lang not in spacy_langs:\n print('invalid trg language: ' + opt.trg_lang + 'supported languages : ' + spacy_langs)\n \n print(\"loading spacy tokenizers...\")\n \n t_src = tokenize(opt.src_lang)\n t_trg = tokenize(opt.trg_lang)\n\n TRG = data.Field(lower=True, tokenize=t_trg.tokenizer, init_token='<sos>', eos_token='<eos>')\n SRC = data.Field(lower=True, tokenize=t_src.tokenizer)\n\n if opt.load_weights is not None:\n try:\n print(\"loading presaved fields...\")\n SRC = pickle.load(open(f'{opt.load_weights}/SRC.pkl', 'rb'))\n TRG = pickle.load(open(f'{opt.load_weights}/TRG.pkl', 'rb'))\n except:\n print(\"error opening SRC.pkl and TXT.pkl field files, please ensure they are in \" + opt.load_weights + \"/\")\n quit()\n \n return(SRC, TRG)\n\ndef create_dataset(opt, SRC, TRG):\n\n print(\"creating dataset and iterator... \")\n\n raw_data = {'src' : [line for line in opt.src_data], 'trg': [line for line in opt.trg_data]}\n df = pd.DataFrame(raw_data, columns=[\"src\", \"trg\"])\n \n mask = (df['src'].str.count(' ') < opt.max_strlen) & (df['trg'].str.count(' ') < opt.max_strlen)\n df = df.loc[mask]\n\n df.to_csv(\"translate_transformer_temp.csv\", index=False)\n \n data_fields = [('src', SRC), ('trg', TRG)]\n train = data.TabularDataset('./translate_transformer_temp.csv', format='csv', fields=data_fields)\n\n train_iter = MyIterator(train, batch_size=opt.batchsize, device='cuda',\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\n batch_size_fn=batch_size_fn, train=True, shuffle=True)\n \n os.remove('translate_transformer_temp.csv')\n\n if opt.load_weights is None:\n SRC.build_vocab(train)\n TRG.build_vocab(train)\n if opt.checkpoint > 0:\n try:\n os.mkdir(\"weights\")\n except:\n print(\"weights folder already exists, run program with -load_weights weights to load them\")\n quit()\n pickle.dump(SRC, open('weights/SRC.pkl', 'wb'))\n pickle.dump(TRG, open('weights/TRG.pkl', 'wb'))\n\n opt.src_pad = SRC.vocab.stoi['<pad>']\n opt.trg_pad = TRG.vocab.stoi['<pad>']\n\n opt.train_len = get_len(train_iter)\n\n return train_iter\n\ndef get_len(train):\n\n for i, b in enumerate(train):\n pass\n \n return i\n"
] | [
[
"pandas.DataFrame"
]
] |
Microsoft/CameraTraps | [
"0f1d5c4f6d7823279a693735778fb985814c5161"
] | [
"visualization/visualization_utils.py"
] | [
"\"\"\"\nvisualization_utils.py\n\nCore rendering functions shared across visualization scripts\n\"\"\"\n\n#%% Constants and imports\n\nfrom io import BytesIO\nfrom typing import Union\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport requests\nfrom PIL import Image, ImageFile, ImageFont, ImageDraw\n\nfrom data_management.annotations import annotation_constants\nfrom data_management.annotations.annotation_constants import (\n detector_bbox_category_id_to_name) # here id is int\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nIMAGE_ROTATIONS = {\n 3: 180,\n 6: 270,\n 8: 90\n}\n\nTEXTALIGN_LEFT = 0\nTEXTALIGN_RIGHT = 1\n\n# convert category ID from int to str\nDEFAULT_DETECTOR_LABEL_MAP = {\n str(k): v for k, v in detector_bbox_category_id_to_name.items()\n}\n\n# Retry on blob storage read failures\nn_retries = 10\nretry_sleep_time = 0.01\nerror_names_for_retry = ['ConnectionError']\n \n\n#%% Functions\n\ndef open_image(input_file: Union[str, BytesIO]) -> Image:\n \"\"\"\n Opens an image in binary format using PIL.Image and converts to RGB mode.\n \n Supports local files or URLs.\n\n This operation is lazy; image will not be actually loaded until the first\n operation that needs to load it (for example, resizing), so file opening\n errors can show up later.\n\n Args:\n input_file: str or BytesIO, either a path to an image file (anything\n that PIL can open), or an image as a stream of bytes\n\n Returns:\n an PIL image object in RGB mode\n \"\"\"\n if (isinstance(input_file, str)\n and input_file.startswith(('http://', 'https://'))):\n try:\n response = requests.get(input_file)\n except Exception as e:\n print(f'Error retrieving image {input_file}: {e}')\n success = False\n if e.__class__.__name__ in error_names_for_retry:\n for i_retry in range(0,n_retries):\n try:\n time.sleep(retry_sleep_time)\n response = requests.get(input_file) \n except Exception as e:\n print(f'Error retrieving image {input_file} on retry {i_retry}: {e}')\n continue\n print('Succeeded on retry {}'.format(i_retry))\n success = True\n break\n if not success:\n raise\n try:\n image = Image.open(BytesIO(response.content))\n except Exception as e:\n print(f'Error opening image {input_file}: {e}')\n raise\n\n else:\n image = Image.open(input_file)\n if image.mode not in ('RGBA', 'RGB', 'L', 'I;16'):\n raise AttributeError(\n f'Image {input_file} uses unsupported mode {image.mode}')\n if image.mode == 'RGBA' or image.mode == 'L':\n # PIL.Image.convert() returns a converted copy of this image\n image = image.convert(mode='RGB')\n\n # Alter orientation as needed according to EXIF tag 0x112 (274) for Orientation\n #\n # https://gist.github.com/dangtrinhnt/a577ece4cbe5364aad28\n # https://www.media.mit.edu/pia/Research/deepview/exif.html\n #\n try:\n exif = image._getexif()\n orientation: int = exif.get(274, None) # 274 is the key for the Orientation field\n if orientation is not None and orientation in IMAGE_ROTATIONS:\n image = image.rotate(IMAGE_ROTATIONS[orientation], expand=True) # returns a rotated copy\n except Exception:\n pass\n\n return image\n\n\ndef load_image(input_file: Union[str, BytesIO]) -> Image:\n \"\"\"\n Loads the image at input_file as a PIL Image into memory.\n\n Image.open() used in open_image() is lazy and errors will occur downstream\n if not explicitly loaded.\n\n Args:\n input_file: str or BytesIO, either a path to an image file (anything\n that PIL can open), or an image as a stream of bytes\n\n Returns: PIL.Image.Image, in RGB mode\n \"\"\"\n image = open_image(input_file)\n image.load()\n return image\n\n\ndef resize_image(image, target_width, target_height=-1):\n \"\"\"\n Resizes a PIL image object to the specified width and height; does not resize\n in place. If either width or height are -1, resizes with aspect ratio preservation.\n If both are -1, returns the original image (does not copy in this case).\n \"\"\"\n\n # Null operation\n if target_width == -1 and target_height == -1:\n return image\n\n elif target_width == -1 or target_height == -1:\n\n # Aspect ratio as width over height\n # ar = w / h\n aspect_ratio = image.size[0] / image.size[1]\n\n if target_width != -1:\n # h = w / ar\n target_height = int(target_width / aspect_ratio)\n else:\n # w = ar * h\n target_width = int(aspect_ratio * target_height)\n\n resized_image = image.resize((target_width, target_height), Image.ANTIALIAS)\n return resized_image\n\n\ndef show_images_in_a_row(images):\n\n num = len(images)\n assert num > 0\n\n if isinstance(images[0], str):\n images = [Image.open(img) for img in images]\n\n fig, axarr = plt.subplots(1, num, squeeze=False) # number of rows, number of columns\n fig.set_size_inches((num * 5, 25)) # each image is 2 inches wide\n for i, img in enumerate(images):\n axarr[0, i].set_axis_off()\n axarr[0, i].imshow(img)\n return fig\n\n\n# The following three functions are modified versions of those at:\n# https://github.com/tensorflow/models/blob/master/research/object_detection/utils/visualization_utils.py\n\nCOLORS = [\n 'AliceBlue', 'Red', 'RoyalBlue', 'Gold', 'Chartreuse', 'Aqua', 'Azure',\n 'Beige', 'Bisque', 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue',\n 'AntiqueWhite', 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson',\n 'Cyan', 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',\n 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',\n 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',\n 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'GoldenRod',\n 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',\n 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',\n 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',\n 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',\n 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',\n 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',\n 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',\n 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',\n 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',\n 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',\n 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',\n 'RosyBrown', 'Aquamarine', 'SaddleBrown', 'Green', 'SandyBrown',\n 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',\n 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',\n 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',\n 'WhiteSmoke', 'Yellow', 'YellowGreen'\n]\n\n\ndef crop_image(detections, image, confidence_threshold=0.8, expansion=0):\n \"\"\"\n Crops detections above *confidence_threshold* from the PIL image *image*,\n returning a list of PIL images.\n\n *detections* should be a list of dictionaries with keys 'conf' and 'bbox';\n see bbox format description below. Normalized, [x,y,w,h], upper-left-origin.\n\n *expansion* specifies a number of pixels to include on each side of the box.\n \"\"\"\n\n ret_images = []\n\n for detection in detections:\n\n score = float(detection['conf'])\n\n if score >= confidence_threshold:\n\n x1, y1, w_box, h_box = detection['bbox']\n ymin,xmin,ymax,xmax = y1, x1, y1 + h_box, x1 + w_box\n\n # Convert to pixels so we can use the PIL crop() function\n im_width, im_height = image.size\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n\n if expansion > 0:\n left -= expansion\n right += expansion\n top -= expansion\n bottom += expansion\n\n # PIL's crop() does surprising things if you provide values outside of\n # the image, clip inputs\n left = max(left,0); right = max(right,0)\n top = max(top,0); bottom = max(bottom,0)\n\n left = min(left,im_width-1); right = min(right,im_width-1)\n top = min(top,im_height-1); bottom = min(bottom,im_height-1)\n\n ret_images.append(image.crop((left, top, right, bottom)))\n\n # ...if this detection is above threshold\n\n # ...for each detection\n\n return ret_images\n\n\ndef render_detection_bounding_boxes(detections, image,\n label_map={},\n classification_label_map={},\n confidence_threshold=0.8, thickness=4, expansion=0,\n classification_confidence_threshold=0.3,\n max_classifications=3,\n colormap=COLORS,\n textalign=TEXTALIGN_LEFT):\n \"\"\"\n Renders bounding boxes, label, and confidence on an image if confidence is above the threshold.\n\n This works with the output of the batch processing API.\n\n Supports classification, if the detection contains classification results according to the\n API output version 1.0.\n\n Args:\n\n detections: detections on the image, example content:\n [\n {\n \"category\": \"2\",\n \"conf\": 0.996,\n \"bbox\": [\n 0.0,\n 0.2762,\n 0.1234,\n 0.2458\n ]\n }\n ]\n\n ...where the bbox coordinates are [x, y, box_width, box_height].\n\n (0, 0) is the upper-left. Coordinates are normalized.\n\n Supports classification results, if *detections* has the format\n [\n {\n \"category\": \"2\",\n \"conf\": 0.996,\n \"bbox\": [\n 0.0,\n 0.2762,\n 0.1234,\n 0.2458\n ]\n \"classifications\": [\n [\"3\", 0.901],\n [\"1\", 0.071],\n [\"4\", 0.025]\n ]\n }\n ]\n\n image: PIL.Image object\n\n label_map: optional, mapping the numerical label to a string name. The type of the numerical label\n (default string) needs to be consistent with the keys in label_map; no casting is carried out.\n\n classification_label_map: optional, mapping of the string class labels to the actual class names.\n The type of the numerical label (default string) needs to be consistent with the keys in\n label_map; no casting is carried out.\n\n confidence_threshold: optional, threshold above which the bounding box is rendered.\n thickness: line thickness in pixels. Default value is 4.\n expansion: number of pixels to expand bounding boxes on each side. Default is 0.\n classification_confidence_threshold: confidence above which classification result is retained.\n max_classifications: maximum number of classification results retained for one image.\n\n image is modified in place.\n \"\"\"\n\n display_boxes = []\n display_strs = [] # list of lists, one list of strings for each bounding box (to accommodate multiple labels)\n classes = [] # for color selection\n\n for detection in detections:\n\n score = detection['conf']\n if score >= confidence_threshold:\n\n x1, y1, w_box, h_box = detection['bbox']\n display_boxes.append([y1, x1, y1 + h_box, x1 + w_box])\n clss = detection['category']\n label = label_map[clss] if clss in label_map else clss\n displayed_label = ['{}: {}%'.format(label, round(100 * score))]\n\n if 'classifications' in detection:\n\n # To avoid duplicate colors with detection-only visualization, offset\n # the classification class index by the number of detection classes\n clss = annotation_constants.NUM_DETECTOR_CATEGORIES + int(detection['classifications'][0][0])\n classifications = detection['classifications']\n if len(classifications) > max_classifications:\n classifications = classifications[0:max_classifications]\n for classification in classifications:\n p = classification[1]\n if p < classification_confidence_threshold:\n continue\n class_key = classification[0]\n if class_key in classification_label_map:\n class_name = classification_label_map[class_key]\n else:\n class_name = class_key\n displayed_label += ['{}: {:5.1%}'.format(class_name.lower(), classification[1])]\n\n # ...if we have detection results\n display_strs.append(displayed_label)\n classes.append(clss)\n\n # ...if the confidence of this detection is above threshold\n\n # ...for each detection\n display_boxes = np.array(display_boxes)\n\n draw_bounding_boxes_on_image(image, display_boxes, classes,\n display_strs=display_strs, thickness=thickness, \n expansion=expansion, colormap=colormap, textalign=textalign)\n\n\ndef draw_bounding_boxes_on_image(image,\n boxes,\n classes,\n thickness=4,\n expansion=0,\n display_strs=(),\n colormap=COLORS,\n textalign=TEXTALIGN_LEFT):\n \"\"\"\n Draws bounding boxes on an image.\n\n Args:\n image: a PIL.Image object.\n boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).\n The coordinates are in normalized format between [0, 1].\n classes: a list of ints or strings (that can be cast to ints) corresponding to the class labels of the boxes.\n This is only used for selecting the color to render the bounding box in.\n thickness: line thickness in pixels. Default value is 4.\n expansion: number of pixels to expand bounding boxes on each side. Default is 0.\n display_strs: list of list of strings.\n a list of strings for each bounding box.\n The reason to pass a list of strings for a\n bounding box is that it might contain\n multiple labels.\n \"\"\"\n\n boxes_shape = boxes.shape\n if not boxes_shape:\n return\n if len(boxes_shape) != 2 or boxes_shape[1] != 4:\n # print('Input must be of size [N, 4], but is ' + str(boxes_shape))\n return # no object detection on this image, return\n for i in range(boxes_shape[0]):\n if display_strs:\n display_str_list = display_strs[i]\n draw_bounding_box_on_image(image,\n boxes[i, 0], boxes[i, 1], boxes[i, 2], boxes[i, 3],\n classes[i],\n thickness=thickness, expansion=expansion,\n display_str_list=display_str_list,\n colormap=colormap,\n textalign=textalign)\n\n\ndef draw_bounding_box_on_image(image,\n ymin,\n xmin,\n ymax,\n xmax,\n clss=None,\n thickness=4,\n expansion=0,\n display_str_list=(),\n use_normalized_coordinates=True,\n label_font_size=16,\n colormap=COLORS,\n textalign=TEXTALIGN_LEFT):\n \"\"\"\n Adds a bounding box to an image.\n\n Bounding box coordinates can be specified in either absolute (pixel) or\n normalized coordinates by setting the use_normalized_coordinates argument.\n\n Each string in display_str_list is displayed on a separate line above the\n bounding box in black text on a rectangle filled with the input 'color'.\n If the top of the bounding box extends to the edge of the image, the strings\n are displayed below the bounding box.\n\n Args:\n image: a PIL.Image object.\n ymin: ymin of bounding box - upper left.\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n clss: str, the class of the object in this bounding box - will be cast to an int.\n thickness: line thickness. Default value is 4.\n expansion: number of pixels to expand bounding boxes on each side. Default is 0.\n display_str_list: list of strings to display in box\n (each to be shown on its own line).\n use_normalized_coordinates: If True (default), treat coordinates\n ymin, xmin, ymax, xmax as relative to the image. Otherwise treat\n coordinates as absolute.\n label_font_size: font size to attempt to load arial.ttf with\n \"\"\"\n if clss is None:\n color = colormap[1]\n else:\n color = colormap[int(clss) % len(colormap)]\n\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n\n if expansion > 0:\n left -= expansion\n right += expansion\n top -= expansion\n bottom += expansion\n\n # Deliberately trimming to the width of the image only in the case where\n # box expansion is turned on. There's not an obvious correct behavior here,\n # but the thinking is that if the caller provided an out-of-range bounding\n # box, they meant to do that, but at least in the eyes of the person writing\n # this comment, if you expand a box for visualization reasons, you don't want\n # to end up with part of a box.\n #\n # A slightly more sophisticated might check whether it was in fact the expansion\n # that made this box larger than the image, but this is the case 99.999% of the time\n # here, so that doesn't seem necessary.\n left = max(left,0); right = max(right,0)\n top = max(top,0); bottom = max(bottom,0)\n\n left = min(left,im_width-1); right = min(right,im_width-1)\n top = min(top,im_height-1); bottom = min(bottom,im_height-1)\n\n draw.line([(left, top), (left, bottom), (right, bottom),\n (right, top), (left, top)], width=thickness, fill=color)\n\n try:\n font = ImageFont.truetype('arial.ttf', label_font_size)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n \n text_left = left\n \n if textalign == TEXTALIGN_RIGHT:\n text_left = right - text_width\n \n margin = np.ceil(0.05 * text_height)\n\n draw.rectangle(\n [(text_left, text_bottom - text_height - 2 * margin), (text_left + text_width,\n text_bottom)],\n fill=color)\n\n draw.text(\n (text_left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n\n text_bottom -= (text_height + 2 * margin)\n\n\ndef render_iMerit_boxes(boxes, classes, image,\n label_map=annotation_constants.annotation_bbox_category_id_to_name):\n \"\"\"\n Renders bounding boxes and their category labels on a PIL image.\n\n Args:\n boxes: bounding box annotations from iMerit, format is [x_rel, y_rel, w_rel, h_rel] (rel = relative coords)\n classes: the class IDs of the predicted class of each box/object\n image: PIL.Image object to annotate on\n label_map: optional dict mapping classes to a string for display\n\n Returns:\n image will be altered in place\n \"\"\"\n\n display_boxes = []\n display_strs = [] # list of list, one list of strings for each bounding box (to accommodate multiple labels)\n for box, clss in zip(boxes, classes):\n if len(box) == 0:\n assert clss == 5\n continue\n x_rel, y_rel, w_rel, h_rel = box\n ymin, xmin = y_rel, x_rel\n ymax = ymin + h_rel\n xmax = xmin + w_rel\n\n display_boxes.append([ymin, xmin, ymax, xmax])\n\n if label_map:\n clss = label_map[int(clss)]\n display_strs.append([clss])\n\n display_boxes = np.array(display_boxes)\n draw_bounding_boxes_on_image(image, display_boxes, classes, display_strs=display_strs)\n\n\ndef render_megadb_bounding_boxes(boxes_info, image):\n \"\"\"\n Args:\n boxes_info: list of dict, each dict represents a single detection\n {\n \"category\": \"animal\",\n \"bbox\": [\n 0.739,\n 0.448,\n 0.187,\n 0.198\n ]\n }\n where bbox coordinates are normalized [x_min, y_min, width, height]\n image: PIL.Image.Image, opened image\n \"\"\"\n display_boxes = []\n display_strs = []\n classes = [] # ints, for selecting colors\n\n for b in boxes_info:\n x_min, y_min, w_rel, h_rel = b['bbox']\n y_max = y_min + h_rel\n x_max = x_min + w_rel\n display_boxes.append([y_min, x_min, y_max, x_max])\n display_strs.append([b['category']])\n classes.append(annotation_constants.detector_bbox_category_name_to_id[b['category']])\n\n display_boxes = np.array(display_boxes)\n draw_bounding_boxes_on_image(image, display_boxes, classes, display_strs=display_strs)\n\n\ndef render_db_bounding_boxes(boxes, classes, image, original_size=None,\n label_map=None, thickness=4, expansion=0):\n \"\"\"\n Render bounding boxes (with class labels) on [image]. This is a wrapper for\n draw_bounding_boxes_on_image, allowing the caller to operate on a resized image\n by providing the original size of the image; bboxes will be scaled accordingly.\n \"\"\"\n\n display_boxes = []\n display_strs = []\n\n if original_size is not None:\n image_size = original_size\n else:\n image_size = image.size\n\n img_width, img_height = image_size\n\n for box, clss in zip(boxes, classes):\n\n x_min_abs, y_min_abs, width_abs, height_abs = box\n\n ymin = y_min_abs / img_height\n ymax = ymin + height_abs / img_height\n\n xmin = x_min_abs / img_width\n xmax = xmin + width_abs / img_width\n\n display_boxes.append([ymin, xmin, ymax, xmax])\n\n if label_map:\n clss = label_map[int(clss)]\n display_strs.append([str(clss)]) # need to be a string here because PIL needs to iterate through chars\n\n display_boxes = np.array(display_boxes)\n draw_bounding_boxes_on_image(image, display_boxes, classes, display_strs=display_strs,\n thickness=thickness, expansion=expansion)\n\n\ndef draw_bounding_boxes_on_file(input_file, output_file, detections, confidence_threshold=0.0,\n detector_label_map=DEFAULT_DETECTOR_LABEL_MAP):\n \"\"\"\n Render detection bounding boxes on an image loaded from file, writing the results to a\n new images file. \"detections\" is in the API results format.\n \"\"\"\n \n image = open_image(input_file)\n\n render_detection_bounding_boxes(\n detections, image, label_map=detector_label_map,\n confidence_threshold=confidence_threshold)\n\n image.save(output_file)\n"
] | [
[
"numpy.array",
"numpy.ceil",
"matplotlib.pyplot.subplots"
]
] |
Q2MM/q2mm | [
"7cda5c376d6e76ebc2a7231e8b7fb11ada389062"
] | [
"q2mm/calculate.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nExtracts data from reference files or calculates FF data.\n\nTakes a sequence of keywords corresponding to various\ndatatypes (ex. mb = MacroModel bond lengths) followed by filenames,\nand extracts that particular data type from the file.\n\nNote that the order of filenames IS IMPORTANT!\n\nUsed to manage calls to MacroModel but that is now done in the\nMae class inside filetypes. I'm still debating if that should be\nthere or here. Will see how this framework translates into\nAmber and then decide.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport logging\nimport logging.config\nimport numpy as np\nimport os\nimport sys\n\n# I don't really want to import all of chain if possible. I only want\n# chain.from_iterable.\n# chain.from_iterable flattens a list of lists similar to:\n# [child for parent in grandparent for child in parent]\n# However, I think chain.from_iterable works on any number of nested lists.\nfrom itertools import chain\nfrom textwrap import TextWrapper\n\nimport constants as co\nimport compare\nimport datatypes\nimport filetypes\nimport parameters\n\nlogger = logging.getLogger(__name__)\n\n# Commands where we need to load the force field.\nCOM_LOAD_FF = ['ma', 'mb', 'mt',\n 'ja', 'jb', 'jt']\n# Commands related to Gaussian.\nCOM_GAUSSIAN = ['gaa','gaao','gab','gabo','gat','gato',\n 'gta','gtb','gtt','ge','ge1', 'gea', 'geo','ge1o', 'geao',\n 'gh', 'geigz']\n# Commands related to Jaguar (Schrodinger).\nCOM_JAGUAR = ['jq', 'jqh', 'jqa',\n 'je', 'jeo', 'jea', 'jeao',\n 'jh', 'jeigz']\n# Commands related to MacroModel (Schrodinger).\n# Seems odd that the Jaguar geometry datatypes are in here, but we\n# do a MacroModel calculation to get the data in an easy form to\n# extract.\nCOM_MACROMODEL = ['ja', 'jb', 'jt',\n 'mq', 'mqh', 'mqa',\n 'ma', 'mb', 'mt',\n 'me', 'meo', 'mea', 'meao',\n 'mh', 'mjeig', 'mgeig',\n 'mp', 'mgESP', 'mjESP']\n# Commands related to Tinker.\nCOM_TINKER = ['ta','tao', 'tb', 'tbo',\n 'tt','tto', 'te', 'teo',\n 'tea','teao', 'th',\n 'tjeigz', 'tgeig']\n# Commands related to Amber.\nCOM_AMBER = ['ae','ae1','aeo','ae1o','abo','aao','ato','ah']\n# All other commands.\nCOM_OTHER = ['r'] \n# All possible commands.\nCOM_ALL = COM_GAUSSIAN + COM_JAGUAR + COM_MACROMODEL + COM_TINKER + \\\n COM_AMBER + COM_OTHER\n\ndef main(args):\n \"\"\"\n Arguments\n ---------\n args : string or list of strings\n Evaluated using parser returned by return_calculate_parser(). If\n it's a string, it will be converted into a list of strings.\n \"\"\"\n # Should be a list of strings for use by argparse. Ensure that's the case.\n # basestring is deprecated in python3, str is probably safe to use in both\n # but should be tested, for now sys.version_info switch can handle it\n if sys.version_info > (3, 0):\n if isinstance(args, str):\n args = args.split()\n else:\n if isinstance(args, basestring):\n args = args.split()\n parser = return_calculate_parser()\n opts = parser.parse_args(args)\n # This makes a dictionary that only contains the arguments related to\n # extracting data from everything in the argparse dictionary, opts.\n # Given that the user supplies:\n # python calculate.py -me a1.01.mae a2.01.mae a3.01.mae -me b1.01.mae\n # b2.01.mae -mb a1.01.mae b1.01.mae -jeig a1.01.in,a1.out\n # b1.01.in,b1.out\n # commands looks like:\n # {'me': [['a1.01.mae', 'a2.01.mae', 'a3.01.mae'],\n # ['b1.01.mae', 'b2.01.mae']],\n # 'mb': [['a1.01.mae'], ['b1.01.mae']],\n # 'jeig': [['a1.01.in,a1.out', 'b1.01.in,b1.out']]\n # }\n commands = {key: value for key, value in opts.__dict__.items() if key\n in COM_ALL and value}\n # Add in the empty commands. I'd rather not do this, but it makes later\n # coding when collecting data easier.\n for command in COM_ALL:\n if command not in commands:\n commands.update({command: []})\n pretty_all_commands(commands)\n # This groups all of the data type commands associated with one file.\n # commands_for_filenames looks like:\n # {'a1.01.mae': ['me', 'mb'],\n # 'a1.01.in': ['jeig'],\n # 'a1.out': ['jeig'],\n # 'a2.01.mae': ['me'],\n # 'a3.01.mae': ['me'],\n # 'b1.01.mae': ['me', 'mb'],\n # 'b1.01.in': ['jeig'],\n # 'b1.out': ['jeig'],\n # 'b2.01.mae': ['me']\n # }\n commands_for_filenames = sort_commands_by_filename(commands)\n pretty_commands_for_files(commands_for_filenames)\n # This dictionary associates the filename that the user supplied with\n # the command file that has to be used to execute some backend software\n # calculate in order to retrieve the data that the user requested.\n # inps looks like:\n # {'a1.01.mae': <__main__.Mae object at 0x1110e10>,\n # 'a1.01.in': None,\n # 'a1.out': None,\n # 'a2.01.mae': <__main__.Mae object at 0x1733b23>,\n # 'a3.01.mae': <__main__.Mae object at 0x1853e12>,\n # 'b1.01.mae': <__main__.Mae object at 0x2540e10>,\n # 'b1.01.in': None,\n # 'b1.out': None,\n # 'b2.01.mae': <__main__.Mae object at 0x1353e11>,\n # }\n inps = {}\n # This generates any of the necessary command files. It uses\n # commands_for_filenames, which contains all of the data types associated\n # with the given file.\n # Stuff below doesn't need both comma separated filenames simultaneously.\n for filename, commands_for_filename in commands_for_filenames.items():\n logger.log(1, '>>> filename: {}'.format(filename))\n logger.log(1, '>>> commands_for_filename: {}'.format(\n commands_for_filename))\n # These next two if statements will break down what command files\n # have to be written by the backend software package.\n if any(x in COM_MACROMODEL for x in commands_for_filename):\n if os.path.splitext(filename)[1] == '.mae':\n inps[filename] = filetypes.Mae(\n os.path.join(opts.directory, filename))\n inps[filename].commands = commands_for_filename\n inps[filename].write_com(sometext=opts.append)\n #Has to be here even though this is a Gaussian Job.\n if os.path.splitext(filename)[1] == '.chk':\n # The generated com file will be used as the input filename. It\n # also seems best to do the gaussian calculation in the \n # collect_data function since we need to collect the force \n # fields partial charges. \n com_filename = os.path.splitext(filename)[0] + '.ESP.q2mm.com'\n inps[com_filename] = filetypes.GaussCom(\n os.path.join(opts.directory, com_filename))\n inps[com_filename].commands = commands_for_filename\n inps[com_filename].read_newzmat(filename)\n \n\n elif any(x in COM_TINKER for x in commands_for_filename):\n if os.path.splitext(filename)[1] == '.xyz':\n inps[filename] = filetypes.TinkerXYZ(\n os.path.join(opts.directory, filename))\n inps[filename].commands = commands_for_filename\n # Gaussian to Tinker\n elif any(x in ['gta','gtb','gtt'] for x in commands_for_filename):\n # For bond, angle, torsion taken from Gaussian\n # The xyz will be collected from Gaussian and be rewritten in corresponding software\n # \n # Q2MM takes commands_for_filename for each line of RDAT and CDAT\n # must make difference type\n # Tinker\n if os.path.splitext(filename)[1] == \".log\":\n inps[filename] = filetypes.TinkerXYZ_FOR_GAUS(\n os.path.join(opts.directory, filename))\n inps[filename].commands = commands_for_filename\n # Gausssian to Amber\n elif any(x in ['gaa','gab','gat','gaao','gabo','gato'] for x in commands_for_filename):\n if os.path.splitext(filename)[1] == \".log\":\n inps[filename] = filetypes.AmberLeap_Gaus(\n os.path.join(opts.directory, filename))\n inps[filename].commands = commands_for_filename\n \n elif any(x in COM_AMBER for x in commands_for_filename):\n if os.path.splitext(filename)[1] == \".in\": # leap.in as for now\n inps[filename] = filetypes.AmberLeap(os.path.join(opts.directory, filename))\n inps[filename].commands = commands_for_filename\n # This doesn't work.\n # We need to know both filenames simultaneously for this Amber crap.\n # Have to add these to `inps` in some other way.\n # pass\n # In this case, no command files have to be written.\n else:\n inps[filename] = None\n # Stuff below needs both comma separated filenames simultaneously.\n # Do the Amber inputs.\n # Leaving the filenames together because Taylor said this would work well.\n# for comma_sep_filenames in flatten(commands['ae']):\n# # Maybe make more specific later.\n# inps[comma_sep_filenames] = filetypes.AmberInput(\n# 'DOES_PATH_EVEN_MATTER')\n# split_it = comma_sep_filenames.split(',')\n# inps[comma_sep_filenames].directory = opts.directory\n# inps[comma_sep_filenames].inpcrd = split_it[0]\n# inps[comma_sep_filenames].prmtop = split_it[1]\n logger.log(1, '>>> commands: {}'.format(commands))\n # Check whether or not to skip calculations.\n if opts.norun or opts.fake:\n logger.log(15, \" -- Skipping backend calculations.\")\n else:\n for filename, some_class in inps.items():\n logger.log(1, '>>> filename: {}'.format(filename))\n logger.log(1, '>>> some_class: {}'.format(some_class))\n # Works if some class is None too.\n if hasattr(some_class, 'run'):\n # Ideally this can be the same for each software backend,\n # but that means we're going to have to make some changes\n # so that this token argument is handled properly.\n some_class.run(check_tokens=opts.check)\n # `data` is a list comprised of datatypes.Datum objects.\n # If we remove/with sorting removed, the Datum class is less\n # useful. We may want to reduce this to a N x 3 matrix or\n # 3 vectors (labels, weights, values).\n sub_names = ['OPT']\n if opts.subnames:\n sub_names = opts.subnames\n if opts.fake:\n data = collect_data_fake(\n commands, inps, direc=opts.directory, invert=opts.invert,\n sub_names=sub_names)\n else:\n data = collect_data(\n commands, inps, direc=opts.directory, invert=opts.invert,\n sub_names=sub_names)\n # Adds weights to the data points in the data list.\n if opts.weight:\n compare.import_weights(data)\n # Optional printing or logging of data.\n if opts.doprint:\n pretty_data(data, log_level=None)\n return data\n\ndef return_calculate_parser(add_help=True, parents=None):\n '''\n Command line argument parser for calculate.\n\n Arguments\n ---------\n add_help : bool\n Whether or not to add help to the parser. Default\n is True.\n parents : argparse.ArgumentParser\n Parent parser incorporated into this parser. Default\n is None.\n '''\n # Whether or not to add parents parsers. Not sure if/where this may be used\n # anymore.\n if parents is None: parents = []\n # Whether or not to add help. You may not want to add help if these\n # arguments are being used in another, higher level parser.\n if add_help:\n parser = argparse.ArgumentParser(\n description=__doc__, parents=parents)\n else:\n parser = argparse.ArgumentParser(\n add_help=False, parents=parents)\n # GENERAL OPTIONS\n opts = parser.add_argument_group(\"calculate options\")\n opts.add_argument(\n '--append', '-a', type=str, metavar='sometext',\n help='Append this text to command files generated by Q2MM.')\n opts.add_argument(\n '--directory', '-d', type=str, metavar='somepath', default=os.getcwd(),\n help=('Directory searched for files '\n '(ex. *.mae, *.log, mm3.fld, etc.). '\n 'Subshell commands (ex. MacroModel) are executed from here. '\n 'Default is the current directory.'))\n opts.add_argument(\n '--doprint', '-p', action='store_true',\n help=(\"Logs data. Can generate extensive log files.\"))\n opts.add_argument(\n '--fake', action='store_true',\n help=(\"Generate fake data sets. Used to expedite testing.\"))\n opts.add_argument(\n '--ffpath', '-f', type=str, metavar='somepath',\n help=(\"Path to force field. Only necessary for certain data types \"\n \"if you don't provide the substructure name.\"))\n opts.add_argument(\n '--invert', '-i', type=float, metavar='somefloat',\n help=(\"This option will invert the smallest eigenvalue to be whatever \"\n \"value is specified by this argument whenever a Hessian is \"\n \"read.\"))\n opts.add_argument(\n '--nocheck', '-nc', action='store_false', dest='check', default=True,\n help=(\"By default, Q2MM checks whether MacroModel tokens are \"\n \"available before attempting a MacroModel calculation. If this \"\n \"option is supplied, MacroModel will not check for tokens \"\n \"first.\"))\n opts.add_argument(\n '--norun', '-n', action='store_true',\n help=\"Don't run 3rd party software.\")\n opts.add_argument(\n '--subnames', '-s', type=str, nargs='+',\n metavar='\"Substructure Name OPT\"',\n help=(\"Names of the substructures containing parameters to \"\n \"optimize in a mm3.fld file.\"))\n opts.add_argument(\n '--weight', '-w', action='store_true',\n help='Add weights to data points.')\n # GAUSSIAN OPTIONS\n gau_args = parser.add_argument_group(\"gaussian reference data types\")\n gau_args.add_argument(\n '-gta', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian angles using Tinker.'))\n gau_args.add_argument(\n '-gtb', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian bonds using Tinker.'))\n gau_args.add_argument(\n '-gtt', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian torsions using Tinker.'))\n gau_args.add_argument(\n '-gaa', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian angles using Amber.'))\n gau_args.add_argument(\n '-gab', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian bonds using Amber.'))\n gau_args.add_argument(\n '-gat', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian torsions using Amber.'))\n gau_args.add_argument(\n '-gaao', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian angles using Amber (POST OPT).'))\n gau_args.add_argument(\n '-gabo', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian bonds using Amber (POST OPT).'))\n gau_args.add_argument(\n '-gato', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian torsions using Amber (POST OPT).'))\n gau_args.add_argument(\n '-ge', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian energies.'))\n gau_args.add_argument(\n '-ge1', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian energy.'))\n gau_args.add_argument(\n '-gea', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian energies. Energies will be relative to the average '\n 'energy within this data type.'))\n gau_args.add_argument(\n '-geo', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian energies. Same as -ge, except the files selected '\n 'by this command will have their energies compared to those '\n 'selected by -meo.'))\n gau_args.add_argument(\n '-ge1o', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian energy. Used for FF a1o commands.'))\n gau_args.add_argument(\n '-geao', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian energies. Same as -ge, except the files selected '\n 'by this command will have their energies compared to those '\n 'selected by -meo. Energies will be relative to the average '\n 'energy within this data type.'))\n gau_args.add_argument(\n '-gh', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help='Gaussian Hessian extracted from a .log archive.')\n gau_args.add_argument(\n '-geigz', type=str, nargs='+', action='append',\n default=[], metavar='somename.log',\n help=('Gaussian eigenmatrix. Incluldes all elements, but zeroes '\n 'all off-diagonal elements. Uses only the .log for '\n 'the eigenvalues and eigenvectors.'))\n # JAGUAR OPTIONS\n jag_args = parser.add_argument_group(\"jaguar reference data types\")\n jag_args.add_argument(\n '-jq', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='Jaguar partial charges.')\n jag_args.add_argument(\n '-jqh', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help=('Jaguar partial charges (excludes aliphatic hydrogens). '\n 'Sums aliphatic hydrogen charges into their bonded sp3 '\n 'carbon.'))\n jag_args.add_argument(\n '-jqa', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help=('Jaguar partial charges. Sums the partial charge of all singly '\n 'bonded hydrogens into its connected atom.'))\n jag_args.add_argument(\n '-je', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='Jaguar energies.')\n jag_args.add_argument(\n '-jea', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help=('Jaguar energies. Everything will be relative to the average '\n 'energy.'))\n jag_args.add_argument(\n '-jeo', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help=('Jaguar energies. Same as -je, except the files selected '\n 'by this command will have their energies compared to those '\n 'selected by -meo.'))\n jag_args.add_argument(\n '-jeao', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help=('Jaguar energies. Same as -jea, except the files selected '\n 'by this command will have their energies compared to those '\n 'selected by -meao.'))\n jag_args.add_argument(\n '-ja', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='Jaguar angles.')\n jag_args.add_argument(\n '-jb', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='Jaguar bond lengths.')\n jag_args.add_argument(\n '-jt', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='Jaguar torsions.')\n jag_args.add_argument(\n '-jh', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help='Jaguar Hessian.')\n jag_args.add_argument(\n '-jeigz', type=str, nargs='+', action='append',\n default=[], metavar='somename.in,somename.out',\n help=('Jaguar eigenmatrix. Incluldes all elements, but zeroes '\n 'all off-diagonal elements.'))\n # ADDITIONAL REFERENCE OPTIONS\n ref_args = parser.add_argument_group(\"other reference data types\")\n ref_args.add_argument(\n '-r', type=str, nargs='+', action='append',\n default=[], metavar='somename.txt',\n help=('Read reference data from file. The reference file should '\n '3 space or tab separated columns. Column 1 is the labels, '\n 'column 2 is the weights and column 3 is the values.'))\n # MACROMODEL OPTIONS\n mm_args = parser.add_argument_group(\"macromodel data types\")\n mm_args.add_argument(\n '-mq', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel charges.')\n mm_args.add_argument(\n '-mqh', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel charges (excludes aliphatic hydrogens).')\n mm_args.add_argument(\n '-mqa', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help=('MacroModel partial charges. Sums the partial charge of all '\n 'singly bonded hydrogens into its connected atom.'))\n mm_args.add_argument(\n '-me', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel energies (pre-FF optimization).')\n mm_args.add_argument(\n '-mea', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel energies (pre-FF optimization). Energies will be '\n 'relative to the average energy.')\n mm_args.add_argument(\n '-meo', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel energies (post-FF optimization).')\n mm_args.add_argument(\n '-meao', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel energies (post-FF optimization). Energies will be '\n 'relative to the average energy.')\n mm_args.add_argument(\n '-mb', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel bond lengths (post-FF optimization).')\n mm_args.add_argument(\n '-ma', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel angles (post-FF optimization).')\n mm_args.add_argument(\n '-mt', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel torsions (post-FF optimization).')\n mm_args.add_argument(\n '-mh', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae',\n help='MacroModel Hessian.')\n mm_args.add_argument(\n '-mjeig', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae,somename.out',\n help='MacroModel eigenmatrix (all elements). Uses Jaguar '\n 'eigenvectors.')\n mm_args.add_argument(\n '-mgeig', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae,somename.log',\n help='MacroModel eigenmatrix (all elements). Uses Gaussian '\n 'eigenvectors.')\n mm_args.add_argument(\n '-mp', type=str, nargs='+', action='append',\n default=[], metavar='somename.fld,somename.txt',\n help='Uses a MM3* FF file (somename.fld) and a parameter file '\n '(somename.txt) to use the current FF parameter values as data. This '\n 'is used for harmonic parameter tethering.')\n mm_args.add_argument(\n '-mgESP', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae,somename.chk',\n help='Uses the partial charges obtained from the FF and *mae file to '\n 'determine the RMS of electrostatic fitting from a gaussain *chk file.')\n mm_args.add_argument(\n '-mjESP', type=str, nargs='+', action='append',\n default=[], metavar='somename.mae,somename.in',\n help='Uses the partial charges obtained from the FF and *mae file to '\n 'determine the RMS of electrostatic fitting from a schrodinger *in '\n 'file.')\n # TINKER OPTIONS\n tin_args = parser.add_argument_group(\"tinker data types\")\n tin_args.add_argument(\n '-te', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker energies (pre-FF optimization).')\n tin_args.add_argument(\n '-tea', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker energies (pre-FF optimization). Energies will be '\n 'relative to the average energy.')\n tin_args.add_argument(\n '-teo', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker energies (post-FF optimization).')\n tin_args.add_argument(\n '-teao', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker energies (post-FF optimization). Energies will be '\n 'relative to the average energy.')\n tin_args.add_argument(\n '-tb', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker bond lengths (pre-FF optimization).')\n tin_args.add_argument(\n '-tbo', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker bond lengths (post-FF optimization).')\n tin_args.add_argument(\n '-ta', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker angles (pre-FF optimization).')\n tin_args.add_argument(\n '-tao', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker angles (post-FF optimization).')\n tin_args.add_argument(\n '-tt', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker torsions (pre-FF optimization).')\n tin_args.add_argument(\n '-tto', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker torsions (post-FF optimization).')\n tin_args.add_argument(\n '-th', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz',\n help='Tinker Hessian.')\n tin_args.add_argument(\n '-tjeig', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz,somename.out',\n help='Tinker eigenmatrix (all elements). Uses Jaguar '\n 'eigenvectors.')\n tin_args.add_argument(\n '-tgeig', type=str, nargs='+', action='append',\n default=[], metavar='somename.xyz,somename.log',\n help='Tinker eigenmatrix (all elements). Uses Gaussian '\n 'eigenvectors.')\n # AMBER OPTIONS\n amb_args = parser.add_argument_group(\"amber data types\")\n amb_args.add_argument(\n '-ae', type=str, nargs='+', action='append',\n default=[], metavar='somename.inpcrd,somename.prmtop',\n help='Amber energies.')\n amb_args.add_argument(\n '-abo', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help=('Amber bonds (post-FF optimization).'))\n amb_args.add_argument(\n '-aao', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help=('Amber angles (post-FF optimization).'))\n amb_args.add_argument(\n '-ato', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help=('Amber torsion (post-FF optimization).'))\n amb_args.add_argument(\n '-ae1', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help='Amber energy (pre-FF optimization).')\n amb_args.add_argument(\n '-ae1o', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help='Amber energy (post-FF optimization).')\n amb_args.add_argument(\n '-ah', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help='Amber Hessian (post-FF optimization).')\n amb_args.add_argument(\n '-aha', type=str, nargs='+', action='append',\n default=[], metavar='somename.in',\n help='Amber Hessian (post-FF optimization).')\n return parser\n\ndef check_outs(filename, outs, classtype, direc):\n \"\"\"\n Reads a file if necessary. Checks the output dictionary first in\n case the file has already been loaded.\n\n Could work on easing the use of this by somehow reducing number of\n arguments required.\n \"\"\"\n logger.log(1, '>>> filename: {}'.format(filename))\n logger.log(1, '>>> outs: {}'.format(outs))\n logger.log(1, '>>> classtype: {}'.format(classtype))\n logger.log(1, '>>> direc: {}'.format(direc))\n if filename not in outs:\n outs[filename] = \\\n classtype(os.path.join(direc, filename))\n return outs[filename]\n\ndef collect_reference(path):\n \"\"\"\n Reads the data inside a reference data text file.\n\n This must have 3 columns:\n 1. Labels\n 2. Weights\n 3. Values\n \"\"\"\n data = []\n with open(path, 'r') as f:\n for i, line in enumerate(f):\n # Skip certain lines.\n if line[0] in ['-', '#']:\n continue\n # if line.startswith('-'):\n # continue\n # Remove everything following a # in a line.\n line = line.partition('#')[0]\n cols = line.split()\n # There should always be 3 columns.\n assert len(cols) == 3, \\\n 'Error reading line {} from {}: {}'.format(\n i, path, line)\n lbl, wht, val = cols\n datum = datatypes.Datum(lbl=lbl, wht=float(wht), val=float(val))\n # Added this from the function below, read_reference()\n lbl_to_data_attrs(datum, lbl)\n data.append(datum)\n return np.array(data)\n\n# Must be rewritten to go in a particular order of data types every time.\ndef collect_data(coms, inps, direc='.', sub_names=['OPT'], invert=None):\n \"\"\"\n Arguments\n ---------\n invert : None or float\n If given, will modify the smallest value of the Hessian to\n this value.\n \"\"\"\n # outs looks like:\n # {'filename1': <some class for filename1>,\n # 'filename2': <some class for filename2>,\n # 'filename3': <some class for filename3>\n # }\n outs = {}\n # List of Datum objects.\n data = []\n # REFERENCE DATA TEXT FILES\n # No grouping is necessary for this data type, so flatten the list of\n # lists.\n filenames = chain.from_iterable(coms['r'])\n for filename in filenames:\n # Unlike most datatypes, these Datum only get the attributes _lbl,\n # val and wht. This is to ensure that making and working with these\n # reference text files isn't too cumbersome.\n data.extend(collect_reference(os.path.join(direc, filename)))\n # MACROMODEL MM3* CURRENT PARAMETER VALUES\n filenames = chain.from_iterable(coms['mp'])\n for comma_filenames in filenames:\n # FF file and parameter file.\n name_fld, name_txt = comma_filenames.split(',')\n ff = datatypes.MM3(os.path.join(direc, name_fld))\n ff.import_ff()\n ff.params = parameters.trim_params_by_file(\n ff.params, os.path.join(direc, name_txt))\n for param in ff.params:\n data.extend([datatypes.Datum(\n val=param.value,\n com='mp',\n typ='p',\n src_1=name_fld,\n src_2=name_txt,\n idx_1=param.mm3_row,\n idx_2=param.mm3_col)])\n # JAGUAR ENERGIES\n filenames_s = coms['je']\n # idx_1 is the number used to group sets of relative energies.\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n # idx_2 corresponds to the structure inside the file in case the\n # .mae files contains multiple structures.\n for idx_2, structure in enumerate(mae.structures):\n try:\n energy = structure.props['r_j_Gas_Phase_Energy']\n except KeyError:\n energy = structure.props['r_j_QM_Energy']\n energy *= co.HARTREE_TO_KJMOL\n temp.append(datatypes.Datum(\n val=energy,\n com='je',\n typ='e',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n # For this data type, we set everything relative.\n zero = min([x.val for x in temp])\n for datum in temp:\n datum.val -= zero\n data.extend(temp)\n # KJK\n # FOR A SINGLE MODEL SYSTEM FITTING\n # GAUSSIAN ENERGY\n filename_s = coms['ge1']\n for idx_1, filenames in enumerate(filename_s):\n temp = []\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n things_to_add = []\n for thing_label in co.GAUSSIAN_ENERGIES:\n thing = log.structures[0].props[thing_label]\n if ',' in thing:\n thing = [float(x) for x in thing.split(',')]\n else:\n thing = [float(thing)]\n things_to_add.append(thing)\n energies = [0.] * len(things_to_add[0])\n for thing_group in things_to_add:\n for i, thing in enumerate(thing_group):\n energies[i] += thing\n energies = [x * co.HARTREE_TO_KJMOL for x in energies]\n for i, e in enumerate(energies):\n temp.append(datatypes.Datum(\n val=e,\n com='ge1',\n typ='e1',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=i + 1))\n data.extend(temp)\n # FOR A SINGLE MODEL SYSTEM FITTING (Comparing to Optimized structure)\n # GAUSSIAN ENERGY\n filename_s = coms['ge1o']\n for idx_1, filenames in enumerate(filename_s):\n temp = []\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n things_to_add = []\n for thing_label in co.GAUSSIAN_ENERGIES:\n thing = log.structures[0].props[thing_label]\n if ',' in thing:\n thing = [float(x) for x in thing.split(',')]\n else:\n thing = [float(thing)]\n things_to_add.append(thing)\n energies = [0.] * len(things_to_add[0])\n for thing_group in things_to_add:\n for i, thing in enumerate(thing_group):\n energies[i] += thing\n energies = [x * co.HARTREE_TO_KJMOL for x in energies]\n for i, e in enumerate(energies):\n temp.append(datatypes.Datum(\n val=e,\n com='ge1o',\n typ='e1o',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=i + 1))\n data.extend(temp)\n # GAUSSIAN ENERGIES\n filename_s = coms['ge']\n for idx_1, filenames in enumerate(filename_s):\n temp = []\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n # This will be a list of lists. For example, let's say that\n # co.GAUSSIAN_ENERGIES is ['HF', 'ZeroPoint'], then\n # the 1st list in things_to_add would be the HF energies\n # and the 2nd list would be the ZP energies.\n #\n # Consider if you had ['HF', 'ZeroPoint'] as co.GAUSSIAN_ENERGIES\n # and your archive had this:\n # HF=0.634,0.2352\\ZeroPoint=0.01234,0.0164\n # The resulting things_to_add would be:\n # things_to_add = [[0.634, 0.2352],\n # [0.01234, 0.0164]]\n things_to_add = []\n # Remember, thing_label is whatever you specified in\n # co.GAUSSIAN_ENERGIES.\n for thing_label in co.GAUSSIAN_ENERGIES:\n # Consider if your Gaussian log archive has the following:\n # HF=0.234,0.1234,0.5732\n # Then, if co.GAUSSIAN_ENERGIES includes 'HF', then that\n # particular thing, or sublist that goes into things_to_add,\n # would look like:\n # thing = ['0.234', '0.1234', '0.5732']\n # Here's another example. Consider if your archive has the\n # property \"stupidproperty\":\n # stupidproperty=can,i,be,more,clear\n # Then this particular sublist, named thing, would be\n # thing = ['can', 'i', 'be', 'more', 'clear']\n # Lastly, consider if you have this:\n # ZeroPoint=0.12341\n # Then thing would be this:\n # thing = ['0.12341']\n thing = log.structures[0].props[thing_label]\n # Deal with multiple structures by checking for this\n # split here.\n if ',' in thing:\n # Note that the \"stupidproperty\" example would fail here\n # because its elements can not be converted to floats.\n thing = [float(x) for x in thing.split(',')]\n # Here, thing might look like:\n # thing = [0.1235235, 0.2352, 0.352345]\n else:\n # Here it would be a list with only one element.\n thing = [float(thing)]\n things_to_add.append(thing)\n # Initialize list of zeros. Python syntax looks funny sometimes.\n # The length of the things_to_add sublists should always be the\n # same if you're doing it right. I suppose you could add some\n # sort of assert here.\n energies = [0.] * len(things_to_add[0])\n # In this case, consider the earlier example where:\n # things_to_add = [[0.634, 0.2352],\n # [0.01234, 0.0164]]\n # Here, the first thing_group would be [0.634, 0.2352] and the\n # second thing_group would be [0.01234, 0.0164].\n for thing_group in things_to_add:\n # After the loop through the 1st thing_group, we would have\n # energies = [0.634, 0.2352]. After the 2nd thing_group, we\n # would have energies = [0.634 + 0.01234, 0.2352 + 0.0164].\n for i, thing in enumerate(thing_group):\n energies[i] += thing\n energies = [x * co.HARTREE_TO_KJMOL for x in energies]\n for i, e in enumerate(energies):\n temp.append(datatypes.Datum(\n val=e,\n com='ge',\n typ='e',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=i + 1))\n\n # This works when HF and ZeroPoint are used. Had to make it more\n # general.\n # Revisit how structures are stored in GaussLog when you have time.\n # hf = log.structures[0].props['HF']\n # zp = log.structures[0].props['ZeroPoint']\n # if ',' in hf:\n # hfs = map(float, hf.split(','))\n # zps = map(float, zp.split(','))\n # else:\n # hfs = [float(hf)]\n # zps = [float(zp)]\n # es = []\n # for hf, zp in izip(hfs, zps):\n # es = (hf + zp) * co.HARTREE_TO_KJMOL\n # for i, e in enumerate(es):\n # temp.append(datatypes.Datum(\n # val=e,\n # com='ge',\n # typ='e',\n # src_1=filename,\n # idx_1=idx_1 + 1,\n # idx_2=i + 1))\n\n # Here's the old code from before we supported multiple energies.\n # I think it's helpful history for new coders trying to understand\n # how to write in new datatypes. Notice how the new code utilizes\n # idx_2.\n\n # hf = float(log.structures[0].props['HF'])\n # zp = float(log.structures[0].props['ZeroPoint'])\n # energy = (hf + zp) * co.HARTREE_TO_KJMOL\n # # We don't use idx_2 since we assume there is only one structure\n # # in a Gaussian .log. I think that's always the case.\n # temp.append(datatypes.Datum(\n # val=energy,\n # com='ge',\n # typ='e',\n # src_1=filename,\n # idx_1=idx_1 + 1))\n\n zero = min([x.val for x in temp])\n for datum in temp:\n datum.val -= zero\n data.extend(temp)\n # MACROMODEL ENERGIES\n filenames_s = coms['me']\n ind = 'pre'\n for idx_1, filenames in enumerate(filenames_s):\n for filename in filenames:\n name_mae = inps[filename].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n indices = inps[filename]._index_output_mae\n # This is list of sets. The 1st value in the set corresponds to the\n # number of the structure. The 2nd value is the structure class.\n selected_structures = filetypes.select_structures(\n mae.structures, indices, ind)\n for idx_2, structure in selected_structures:\n data.append(datatypes.Datum(\n val=structure.props['r_mmod_Potential_Energy-MM3*'],\n com='me',\n typ='e',\n src_1=inps[filename].name_mae,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n # KJK\n # GAUSSIAN TO AMBER BONDS (PRE OPT)\n filenames_s = coms['gab']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'gab', 'pre', 'bonds', idx_1 = idx_1))\n data.extend(temp) \n # GAUSSIAN TO AMBER ANGLES (PRE OPT)\n filenames_s = coms['gaa']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'gaao', 'pre', 'angles', idx_1 = idx_1))\n data.extend(temp) \n # GAUSSIAN TO AMBER TORSIONS (PRE OPT)\n filenames_s = coms['gat']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'gato', 'pre', 'torsions', idx_1 = idx_1))\n data.extend(temp) \n # GAUSSIAN TO AMBER BONDS (POST OPT)\n filenames_s = coms['gabo']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'gabo', 'opt', 'bonds', idx_1 = idx_1))\n data.extend(temp) \n # GAUSSIAN TO AMBER ANGLES (POST OPT)\n filenames_s = coms['gaao']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'gaao', 'opt', 'angles', idx_1 = idx_1))\n data.extend(temp) \n # GAUSSIAN TO AMBER TORSIONS (POST OPT)\n filenames_s = coms['gato']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'gato', 'opt', 'torsions', idx_1 = idx_1))\n data.extend(temp) \n # AMBER BONDS\n filenames_s = coms['abo']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'abo', 'opt', 'bonds', idx_1 = idx_1))\n data.extend(temp) \n # AMBER ANGLES\n filenames_s = coms['aao']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'aao', 'opt', 'angles', idx_1 = idx_1))\n data.extend(temp) \n # AMBER TORSIONS\n filenames_s = coms['ato']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.extend(collect_structural_data_from_amber_geo(\n filename, inps, outs, direc, 'ato', 'opt', 'torsions', idx_1 = idx_1))\n data.extend(temp) \n # AMBER ENERGY\n filenames_s = coms['ae1']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.append(collect_structural_data_from_amber_ene(\n filename, inps, outs, direc, 'ae1', 'pre', 'e1', idx_1 = idx_1))\n data.extend(temp) \n\n # AMBER OPTIMIZED ENERGY\n filenames_s = coms['ae1o']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.append(collect_structural_data_from_amber_ene(\n filename, inps, outs, direc, 'ae1o', 'opt', 'e1o', idx_1 = idx_1))\n data.extend(temp) \n \n # AMBER HESSIAN\n filenames = chain.from_iterable(coms['ah'])\n for filename in filenames:\n name_hes = inps[filename].name_hes\n hes = check_outs(name_hes, outs, filetypes.AmberHess, direc)\n hess = hes.hessian\n # hessian extracted from Amber is already mass weighted\n low_tri_idx = np.tril_indices_from(hess)\n low_tri = hess[low_tri_idx]\n int2 = []\n int3 = []\n int4 = []\n if os.path.isfile(\"calc/geo.npy\"):\n hes_geo = None\n if np.__version__ >= '1.16.4':\n hes_geo = np.load(\"calc/geo.npy\",allow_pickle=True)\n else:\n hes_geo = np.load(\"calc/geo.npy\")\n for ele in hes_geo:\n inter = np.count_nonzero(ele)\n a,b,c,d = ele\n if inter == 2:\n a = int(a)\n b = int(b)\n int2.append([a,b])\n elif inter == 3:\n a = int(a)\n c = int(c)\n int3.append([a,c])\n elif inter == 4: \n a = int(a)\n d = int(d) \n int4.append([a,d])\n frozen = 0\n f_atom = []\n if os.path.isfile(\"fixedatoms.txt\"):\n frozen = 1\n ref = open(\"fixedatoms.txt\",\"r\")\n flines = ref.readlines()\n for fline in flines:\n line = fline.split()\n if len(line) == 1:\n f_atom.append(int(line[0]))\n print(\"Reading fixedatoms.txt\\nFixed Atom Numbers:\",f_atom)\n def int_wht(at_1,at_2):\n \"\"\"\n Weighted value for hessian matrix\n default value\n diagonal zero\n 1-2 0.031\n 1-3 0.031\n 1-4 0.31\n else 0.031\n \"\"\"\n apair = [at_1,at_2]\n if at_1 == at_2:\n return 0.0\n elif apair in int2:\n return co.WEIGHTS['h12']\n elif apair in int3:\n return co.WEIGHTS['h14']\n elif apair in int4:\n return co.WEIGHTS['h14']\n elif frozen:\n if at_1 in f_atom or at_2 in f_atom:\n #print(\"DEBUG:\",at_1,at_2,f_atom)\n return 0.0\n else:\n return 1.0\n else:\n return 1.0\n data.extend([datatypes.Datum(\n val=e,\n com='ah',\n typ='h',\n src_1=hes.filename,\n idx_1=x + 1,\n idx_2=y + 1,\n atm_1=int((x)//3+1),\n atm_2=int((y)//3+1),\n wht = int_wht(int((x)//3+1),int((y)//3+1)))\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n \n # AMBER ENERGIES\n# filenames_s = coms['ae']\n# for idx_1, filenames in enumerate(filenames_s):\n# logger.log(1, '>>> idx_1: {}'.format(idx_1))\n# logger.log(1, '>>> filenames: {}'.format(filenames))\n# for idx_2, comma_sep_filenames in enumerate(filenames):\n# name_1, name_2 = comma_sep_filenames.split(',')\n# out = check_outs(\n# comma_sep_filenames, outs, filetypes.AmberOut, direc)\n# # Right now, path is a comma separated string.\n# out.path = inps[comma_sep_filenames].out\n# logger.log(1, '>>> out: {}'.format(out))\n# energy = out.read_energy()\n# data.append(datatypes.Datum(\n# val=energy,\n# com='ae',\n# typ='e',\n# src_1=name_1,\n# src_2=name_2,\n# idx_1=idx_1 + 1,\n# idx_2=idx_2 + 1))\n # JAGUAR AVERAGE ENERGIES\n filenames_s = coms['jea']\n # idx_1 is the number used to group sets of relative energies.\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n # idx_2 corresponds to the structure inside the file in case the\n # .mae files contains multiple structures.\n for idx_2, structure in enumerate(mae.structures):\n try:\n energy = structure.props['r_j_Gas_Phase_Energy']\n except KeyError:\n energy = structure.props['r_j_QM_Energy']\n energy *= co.HARTREE_TO_KJMOL\n temp.append(datatypes.Datum(\n val=energy,\n com='jea',\n typ='ea',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n # For this data type, we set everything relative.\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # GAUSSIAN AVERAGE ENERGIES\n filename_s = coms['gea']\n for idx_1, filenames in enumerate(filename_s):\n temp = []\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n things_to_add = []\n for thing_label in co.GAUSSIAN_ENERGIES:\n thing = log.structures[0].props[thing_label]\n if ',' in thing:\n thing = [float(x) for x in thing.split(',')]\n else:\n thing = [float(thing)]\n things_to_add.append(thing)\n energies = [0.] * len(things_to_add[0])\n for thing_group in things_to_add:\n for i, thing in enumerate(thing_group):\n energies[i] += thing\n energies = [x * co.HARTREE_TO_KJMOL for x in energies]\n for i, e in enumerate(energies):\n temp.append(datatypes.Datum(\n val=e,\n com='gea',\n typ='ea',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=i + 1))\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # MACROMODEL AVERAGE ENERGIES\n filenames_s = coms['mea']\n ind = 'pre'\n # idx_1 is the number used to group sets of relative energies.\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n name_mae = inps[filename].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n indices = inps[filename]._index_output_mae\n # This is list of sets. The 1st value in the set corresponds to the\n # number of the structure. The 2nd value is the structure class.\n selected_structures = filetypes.select_structures(\n mae.structures, indices, ind)\n for idx_2, structure in selected_structures:\n temp.append(datatypes.Datum(\n val=structure.props['r_mmod_Potential_Energy-MM3*'],\n com='mea',\n typ='ea',\n src_1=inps[filename].name_mae,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # JAGUAR ENERGIES COMPARED TO OPTIMIZED MM\n filenames_s = coms['jeo']\n # idx_1 is the number used to group sets of relative energies.\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n # idx_2 corresponds to the structure inside the file in case the\n # .mae files contains multiple structures.\n for idx_2, structure in enumerate(mae.structures):\n try:\n energy = structure.props['r_j_Gas_Phase_Energy']\n except KeyError:\n energy = structure.props['r_j_QM_Energy']\n energy *= co.HARTREE_TO_KJMOL\n temp.append(datatypes.Datum(\n val=energy,\n com='jeo',\n typ='eo',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n # For this data type, we set everything relative.\n zero = min([x.val for x in temp])\n for datum in temp:\n datum.val -= zero\n data.extend(temp)\n # GAUSSIAN ENERGIES RELATIVE TO OPTIMIZED MM\n filename_s = coms['geo']\n for idx_1, filenames in enumerate(filename_s):\n temp = []\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n things_to_add = []\n for thing_label in co.GAUSSIAN_ENERGIES:\n thing = log.structures[0].props[thing_label]\n if ',' in thing:\n thing = [float(x) for x in thing.split(',')]\n else:\n thing = [float(thing)]\n things_to_add.append(thing)\n energies = [0.] * len(things_to_add[0])\n for thing_group in things_to_add:\n for i, thing in enumerate(thing_group):\n energies[i] += thing\n energies = [x * co.HARTREE_TO_KJMOL for x in energies]\n for i, e in enumerate(energies):\n temp.append(datatypes.Datum(\n val=e,\n com='geo',\n typ='eo',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=i + 1))\n zero = min([x.val for x in temp])\n for datum in temp:\n datum.val -= zero\n data.extend(temp)\n # MACROMODEL OPTIMIZED ENERGIES\n filenames_s = coms['meo']\n ind = 'opt'\n for idx_1, filenames in enumerate(filenames_s):\n for filename in filenames:\n name_mae = inps[filename].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n indices = inps[filename]._index_output_mae\n selected_structures = filetypes.select_structures(\n mae.structures, indices, ind)\n for idx_2, structure in selected_structures:\n data.append(datatypes.Datum(\n val=structure.props['r_mmod_Potential_Energy-MM3*'],\n com='meo',\n typ='eo',\n src_1=inps[filename].name_mae,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n # JAGUAR ENERGIES RELATIVE TO AVERAGE COMPARED TO OPTIMIZED MM\n filenames_s = coms['jeao']\n # idx_1 is the number used to group sets of relative energies.\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n # idx_2 corresponds to the structure inside the file in case the\n # .mae files contains multiple structures.\n for idx_2, structure in enumerate(mae.structures):\n try:\n energy = structure.props['r_j_Gas_Phase_Energy']\n except KeyError:\n energy = structure.props['r_j_QM_Energy']\n energy *= co.HARTREE_TO_KJMOL\n temp.append(datatypes.Datum(\n val=energy,\n com='jeao',\n typ='eao',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # GAUSSIAN AVERAGE ENERGIES RELATIVE TO OPTIMIZED MM\n filename_s = coms['geao']\n for idx_1, filenames in enumerate(filename_s):\n temp = []\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n things_to_add = []\n for thing_label in co.GAUSSIAN_ENERGIES:\n thing = log.structures[0].props[thing_label]\n if ',' in thing:\n thing = [float(x) for x in thing.split(',')]\n else:\n thing = [float(thing)]\n things_to_add.append(thing)\n energies = [0.] * len(things_to_add[0])\n for thing_group in things_to_add:\n for i, thing in enumerate(thing_group):\n energies[i] += thing\n energies = [x * co.HARTREE_TO_KJMOL for x in energies]\n for i, e in enumerate(energies):\n temp.append(datatypes.Datum(\n val=e,\n com='geao',\n typ='eao',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=i + 1))\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # MACROMODEL OPTIMIZED ENERGIES RELATIVE TO AVERAGE\n filenames_s = coms['meao']\n ind = 'opt'\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n name_mae = inps[filename].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n indices = inps[filename]._index_output_mae\n selected_structures = filetypes.select_structures(\n mae.structures, indices, ind)\n for idx_2, structure in selected_structures:\n temp.append(datatypes.Datum(\n val=structure.props['r_mmod_Potential_Energy-MM3*'],\n com='meao',\n typ='eao',\n src_1=inps[filename].name_mae,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # JAGUAR BONDS\n filenames = chain.from_iterable(coms['jb'])\n for filename in filenames:\n data.extend(collect_structural_data_from_mae(\n filename, inps, outs, direc, sub_names, 'jb', 'pre', 'bonds'))\n # GAUSSIAN BONDS\n filenames = chain.from_iterable(coms['gtb'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log_for_gaussian(\n filename, inps, outs, direc, 'gtb', 'pre', 'bonds'))\n # GAUSSIAN ANGLES\n filenames = chain.from_iterable(coms['gta'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log_for_gaussian(\n filename, inps, outs, direc, 'gta', 'pre', 'angles'))\n # GAUSSIAN TORSIONS\n filenames = chain.from_iterable(coms['gtt'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log_for_gaussian(\n filename, inps, outs, direc, 'gtt', 'pre', 'torsions'))\n # TINKER SP BONDS\n filenames = chain.from_iterable(coms['tb'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'tb', 'pre', 'bonds'))\n # TINKER SP ANGLES\n filenames = chain.from_iterable(coms['ta'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'ta', 'pre', 'angles'))\n # TINKER SP TORSIONS\n filenames = chain.from_iterable(coms['tt'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'tt', 'pre', 'torsions'))\n # TINKER OPTIMIZED BONDS\n filenames = chain.from_iterable(coms['tbo'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'tbo', 'opt', 'bonds'))\n # TINKER OPTIMIZED ANGLE\n filenames = chain.from_iterable(coms['tao'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'tao', 'opt', 'angles'))\n # TINKER OPTIMIZED ANGLE\n filenames = chain.from_iterable(coms['tto'])\n for filename in filenames:\n data.extend(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'tto', 'opt', 'torsions'))\n # TINKER ENERGIES RELATIVE TO LOWEST\n filenames_s = coms['te']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.append(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'te', 'pre', 'e', idx_1 = idx_1))\n zero = min([x.val for x in temp])\n for datum in temp:\n datum.val -= zero\n data.extend(temp)\n # TINKER ENERGIES RELATIVE TO AVERAGE\n filenames_s = coms['tea']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.append(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'tea', 'pre', 'ea', idx_1 = idx_1))\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # TINKER OPTIMIZED ENERGIES RELATIVE LOWEST\n filenames_s = coms['teo']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.append(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'teo', 'opt', 'eo', idx_1 = idx_1))\n zero = min([x.val for x in temp])\n for datum in temp:\n datum.val -= zero\n data.extend(temp)\n # TINKER OPTIMIZED ENERGIES RELATIVE TO AVERAGE\n filenames_s = coms['teao']\n for idx_1, filenames in enumerate(filenames_s):\n temp = []\n for filename in filenames:\n temp.append(collect_structural_data_from_tinker_log(\n filename, inps, outs, direc, 'teao', 'opt', 'eao', idx_1 = idx_1))\n avg = sum([x.val for x in temp]) / len(temp)\n for datum in temp:\n datum.val -= avg\n data.extend(temp)\n # TINKER HESSIAN\n filenames = chain.from_iterable(coms['th'])\n for filename in filenames:\n xyz_struct = inps[filename].structures[0]\n num_atoms = xyz_struct.props['total atoms']\n name_hes = inps[filename].name_hes\n hes = check_outs(name_hes, outs, filetypes.TinkerHess, direc)\n hes.natoms = num_atoms\n hess = hes.hessian\n datatypes.mass_weight_hessian(hess, xyz_struct.atoms)\n # Need to figure out dummy atoms at somepoint?\n # I'm not even sure if we can use dummy atoms in TINKER.\n low_tri_idx = np.tril_indices_from(hess)\n low_tri = hess[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='th',\n typ='h',\n src_1=hes.filename,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # TINKER EIGENMATRIX USING GAUSSIAN EIGENVECTORS\n filenames = chain.from_iterable(coms['tgeig'])\n for comma_filenames in filenames:\n name_xyz, name_gau_log = comma_filenames.split(',')\n name_xyz_hes = inps[name_xyz].name_hes\n xyz = check_outs(name_xyz, outs, filetypes.Tinker_xyz, direc)\n xyz_hes = check_outs(name_xyz_hes, outs, filetypes.TinkerHess, direc)\n gau_log = check_outs(name_gau_log, outs, filetypes.GaussLog, direc)\n xyz_struct = xyz.structures[0]\n num_atoms = xyz_struct.props['total atoms']\n xyz_hes.natoms = num_atoms\n hess = xyz_hes.hessian\n datatypes.mass_weight_hessian(hess, xyz_struct.atoms)\n evec = gau_log.evecs\n try:\n eigenmatrix = np.dot(np.dot(evec, hess), evec.T)\n except ValueError:\n logger.warning('Matrices not aligned!')\n logger.warning('Hessian retrieved from {}: {}'.format(\n name_mae_log, hess.shape))\n logger.warning('Eigenvectors retrieved from {}: {}'.format(\n name_gau_log, evec.shape))\n raise\n low_tri_idx = np.tril_indices_from(eigenmatrix)\n low_tri = eigenmatrix[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='tgeig',\n typ='eig',\n src_1=name_xyz,\n src_2=name_gau_log,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # MACROMODEL BONDS\n filenames = chain.from_iterable(coms['mb'])\n for filename in filenames:\n data.extend(collect_structural_data_from_mae(\n filename, inps, outs, direc, sub_names, 'mb', 'opt', 'bonds'))\n # JAGUAR ANGLES\n filenames = chain.from_iterable(coms['ja'])\n for filename in filenames:\n data.extend(collect_structural_data_from_mae(\n filename, inps, outs, direc, sub_names, 'ja', 'pre', 'angles'))\n # MACROMODEL BONDS\n filenames = chain.from_iterable(coms['ma'])\n for filename in filenames:\n data.extend(collect_structural_data_from_mae(\n filename, inps, outs, direc, sub_names, 'ma', 'opt', 'angles'))\n # JAGUAR BONDS\n filenames = chain.from_iterable(coms['jt'])\n for filename in filenames:\n data.extend(collect_structural_data_from_mae(\n filename, inps, outs, direc, sub_names, 'jt', 'pre', 'torsions'))\n # MACROMODEL BONDS\n filenames = chain.from_iterable(coms['mt'])\n for filename in filenames:\n data.extend(collect_structural_data_from_mae(\n filename, inps, outs, direc, sub_names, 'mt', 'opt', 'torsions'))\n # JAGUAR CHARGES\n filenames = chain.from_iterable(coms['jq'])\n for filename in filenames:\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n for idx_1, structure in enumerate(mae.structures):\n for atom in structure.atoms:\n # If it doesn't have the property b_q_use_charge,\n # use it.\n # If b_q_use_charge is 1, use it. If it's 0, don't\n # use it.\n if not 'b_q_use_charge' in atom.props or \\\n atom.props['b_q_use_charge']:\n data.append(datatypes.Datum(\n val=atom.partial_charge,\n com='jq',\n typ='q',\n src_1=filename,\n idx_1=idx_1 + 1,\n atm_1=atom.index))\n # MACROMODEL CHARGES\n filenames = chain.from_iterable(coms['mq'])\n for filename in filenames:\n name_mae = inps[filename].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n # Pick out the right structures. Sometimes our .com files\n # generate many structures in a .mae, not all of which\n # apply to this command.\n structures = filetypes.select_structures(\n mae.structures, inps[filename]._index_output_mae, 'pre')\n for idx_1, structure in structures:\n for atom in structure.atoms:\n if not 'b_q_use_charge' in atom.props or \\\n atom.props['b_q_use_charge']:\n data.append(datatypes.Datum(\n val=atom.partial_charge,\n com='mq',\n typ='q',\n src_1=filename,\n idx_1=idx_1 + 1,\n atm_1=atom.index))\n # MACROMODEL+GUASSIAN ESP \n filenames = chain.from_iterable(coms['mgESP'])\n for comma_filenames in filenames:\n charges_list = []\n filename_mae, name_gau_chk = comma_filenames.split(',')\n #Filename of the output *mae file (i.e. filename.q2mm.mae)\n name_mae = inps[filename_mae].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n structures = filetypes.select_structures(\n mae.structures, inps[filename_mae]._index_output_mae, 'pre')\n for idx_1, structure in structures:\n for atom in structure.atoms:\n ### I think we want all the charges, right?\n #if not 'b_q_use_charge' in atom.props or \\\n # atom.props['b_q_use_charge']:\n if atom.atomic_num > 0:\n charges_list.append(atom.partial_charge)\n com_filename = os.path.splitext(name_gau_chk)[0] + '.ESP.q2mm.com'\n inps[com_filename].charge_list = charges_list\n inps[com_filename].write_com()\n inps[com_filename].run_gaussian()\n name_gauss_log = inps[com_filename].name_log\n gauss = check_outs(name_gauss_log, outs, filetypes.GaussLog, direc)\n esp_rms = gauss.esp_rms\n if esp_rms < 0.0:\n raise Exception('A negative RMS was obtained for the ESP fitting '\n 'which indicates an error occured. Look at the '\n 'following file: {}'.format(name_gauss_log))\n data.append(datatypes.Datum(\n val=esp_rms,\n com='mgESP',\n typ='esp',\n src_1= name_mae,\n src_2='gaussian',\n idx_1 = 1))\n # MACROMODEL+JAGUAR ESP \n ## This does not work, I still need to write code to support Jaguaer. -TR\n filenames = chain.from_iterable(coms['mjESP'])\n for comma_filenames in filenames:\n charges_list = []\n name_mae, name_jag_chk = comma_filenames.split(',')\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n structures = filetypes.select_structures(\n mae.structures, inps[name_mae]._index_output_mae, 'pre')\n for idx_1, structure in structures:\n for atom in structure.atoms:\n if not 'b_q_use_charge' in atom.props or \\\n atom.props['b_q_use_charge']:\n charges_list.append(atom.partial_charge)\n ###Filler for ESP calculations####\n ### This is what is used in anna's code\n current_RMS = run_ChelpG_inp.run_JCHelpG(charges_list,name_jag_chk)\n \n ### End of filler\n if current_RMS < 0:\n sys.exit(\"Error while computing RMS. Exiting\")\n data.append(datatypes.Datum(\n val=current_RMS,\n com='mjESP',\n typ='esp',\n src_1=name_mae,\n idx_1=1))\n # JAGUAR CHARGES EXCLUDING ALIPHATIC HYDROGENS\n filenames = chain.from_iterable(coms['jqh'])\n for filename in filenames:\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n for idx_1, structure in enumerate(mae.structures):\n aliph_hyds = structure.get_aliph_hyds()\n for atom in structure.atoms:\n # If it doesn't have the property b_q_use_charge,\n # use it.\n # If b_q_use_charge is 1, use it. If it's 0, don't\n # use it.\n if (not 'b_q_use_charge' in atom.props or \\\n atom.props['b_q_use_charge']) and \\\n not atom in aliph_hyds:\n charge = atom.partial_charge\n if atom.atom_type == 3:\n for bonded_atom_index in atom.bonded_atom_indices:\n bonded_atom = structure.atoms[bonded_atom_index - 1]\n if bonded_atom in aliph_hyds:\n charge += bonded_atom.partial_charge\n data.append(datatypes.Datum(\n val=charge,\n com='jqh',\n typ='qh',\n src_1=filename,\n idx_1=idx_1 + 1,\n atm_1=atom.index))\n # MACROMODEL CHARGES EXCLUDING ALIPHATIC HYDROGENS\n filenames = chain.from_iterable(coms['mqh'])\n for filename in filenames:\n name_mae = inps[filename].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n # Pick out the right structures. Sometimes our .com files\n # generate many structures in a .mae, not all of which\n # apply to this command.\n structures = filetypes.select_structures(\n mae.structures, inps[filename]._index_output_mae, 'pre')\n for idx_1, structure in structures:\n aliph_hyds = structure.get_aliph_hyds()\n for atom in structure.atoms:\n if (not 'b_q_use_charge' in atom.props or \\\n atom.props['b_q_use_charge']) and \\\n atom not in aliph_hyds:\n\n # Since the charge is always zero AS FAR AS I KNOW, this\n # whole recalculation of the charge is totally unnecessary.\n # However, I want users to be aware that if a situation\n # arises that goes beyond something I experienced,\n # uncommenting this section, thereby making it more like the\n # code for -jqh, should solve the problem.\n\n # charge = atom.partial_charge\n # if atom.atom_type == 3:\n # for bonded_atom_index in atom.bonded_atom_indices:\n # bonded_atom = structure.atoms[bonded_atom_index - 1]\n # if bonded_atom in aliph_hyds:\n # charge += bonded_atom.partial_charge\n\n data.append(datatypes.Datum(\n # val=charge,\n val=atom.partial_charge,\n com='mqh',\n typ='qh',\n src_1=filename,\n idx_1=idx_1 + 1,\n atm_1=atom.index))\n # JAGUAR CHARGES EXCLUDING ALL SINGLE BONDED HYDROGENS\n filenames = chain.from_iterable(coms['jqa'])\n for filename in filenames:\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n for idx_1, structure in enumerate(mae.structures):\n hyds = structure.get_hyds()\n for atom in structure.atoms:\n # Check if we want to use this charge and ensure it's not a\n # hydrogen.\n if (not 'b_q_use_charge' in atom.props or \\\n atom.props['b_q_use_charge']) and \\\n atom not in hyds:\n charge = atom.partial_charge\n # Check if it's bonded to a hydrogen.\n for bonded_atom_index in atom.bonded_atom_indices:\n bonded_atom = structure.atoms[bonded_atom_index - 1]\n if bonded_atom in hyds:\n if len(bonded_atom.bonded_atom_indices) < 2:\n charge += bonded_atom.partial_charge\n data.append(datatypes.Datum(\n val=charge,\n com='jqa',\n typ='qa',\n src_1=filename,\n idx_1=idx_1 + 1,\n atm_1=atom.index))\n # MACROMODEL CHARGES EXCLUDING ALL SINGLE BONDED HYDROGENS\n filenames = chain.from_iterable(coms['mqa'])\n for filename in filenames:\n name_mae = inps[filename].name_mae\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n # Pick out the right structures. Sometimes our .com files\n # generate many structures in a .mae, not all of which\n # apply to this command.\n structures = filetypes.select_structures(\n mae.structures, inps[filename]._index_output_mae, 'pre')\n for idx_1, structure in structures:\n hyds = structure.get_hyds()\n for atom in structure.atoms:\n if (not 'b_q_use_charge' in atom.props or \\\n atom.props['b_q_use_charge']) and \\\n atom not in hyds:\n charge = atom.partial_charge\n for bonded_atom_index in atom.bonded_atom_indices:\n bonded_atom = structure.atoms[bonded_atom_index - 1]\n if bonded_atom in hyds:\n if len(bonded_atom.bonded_atom_indices) < 2:\n charge += bonded_atom.partial_charge\n data.append(datatypes.Datum(\n val=charge,\n com='mqa',\n typ='qa',\n src_1=filename,\n idx_1=idx_1 + 1,\n atm_1=atom.index))\n # JAGUAR HESSIAN\n filenames = chain.from_iterable(coms['jh'])\n for filename in filenames:\n jin = check_outs(filename, outs, filetypes.JaguarIn, direc)\n hess = jin.hessian\n datatypes.mass_weight_hessian(hess, jin.structures[0].atoms)\n if invert:\n evals, evecs = np.linalg.eigh(hess)\n datatypes.replace_minimum(evals, value=invert)\n hess = evecs.dot(np.diag(evals).dot(evecs.T))\n low_tri_idx = np.tril_indices_from(hess)\n low_tri = hess[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='jh',\n typ='h',\n src_1=jin.filename,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # GAUSSIAN HESSIAN\n filenames = chain.from_iterable(coms['gh'])\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n log.read_archive()\n # For now, the Hessian is stored on the structures inside the filetype.\n hess = log.structures[0].hess\n datatypes.mass_weight_hessian(hess, log.structures[0].atoms)\n if invert:\n # Faster to use scipy.linalg.eig or scipy.linalg.eigsh (even\n # faster).\n evals, evecs = np.linalg.eigh(hess)\n # Returns True.\n # print(np.allclose(evecs.dot(np.diag(evals).dot(evecs.T)), hess))\n datatypes.replace_minimum(evals, value=invert)\n hess = evecs.dot(np.diag(evals).dot(evecs.T))\n # Oh crap, just realized this probably needs to be mass weighted.\n # WARNING: This option may need to be mass weighted!\n low_tri_idx = np.tril_indices_from(hess)\n low_tri = hess[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='gh',\n typ='h',\n src_1=log.filename,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # MACROMODEL HESSIAN\n filenames = chain.from_iterable(coms['mh'])\n for filename in filenames:\n # Get the .log for the .mae.\n name_log = inps[filename].name_log\n # Used to get dummy atoms.\n mae = check_outs(filename, outs, filetypes.Mae, direc)\n # Used to get the Hessian.\n log = check_outs(name_log, outs, filetypes.MacroModelLog, direc)\n hess = log.hessian\n dummies = mae.structures[0].get_dummy_atom_indices()\n hess_dummies = datatypes.get_dummy_hessian_indices(dummies)\n hess = datatypes.check_mm_dummy(hess, hess_dummies)\n low_tri_idx = np.tril_indices_from(hess)\n low_tri = hess[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='mh',\n typ='h',\n src_1=mae.filename,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # JAGUAR EIGENMATRIX\n filenames = chain.from_iterable(coms['jeigz'])\n for comma_sep_filenames in filenames:\n name_in, name_out = comma_sep_filenames.split(',')\n jin = check_outs(name_in, outs, filetypes.JaguarIn, direc)\n out = check_outs(name_out, outs, filetypes.JaguarOut, direc)\n hess = jin.hessian\n evec = out.eigenvectors\n datatypes.mass_weight_hessian(hess, jin.structures[0].atoms)\n datatypes.mass_weight_eigenvectors(evec, out.structures[0].atoms)\n try:\n eigenmatrix = np.dot(np.dot(evec, hess), evec.T)\n except ValueError:\n logger.warning('Matrices not aligned!')\n logger.warning('Hessian retrieved from {}: {}'.format(\n name_in, hess.shape))\n logger.warning('Eigenvectors retrieved from {}: {}'.format(\n name_out, evec.shape))\n raise\n\n # Funny way to make off-diagonal elements zero.\n # eigenmatrix = np.diag(np.diag(eigenmatrix))\n\n # Take diagonal into one dimensional array.\n eigenmatrix = np.diag(eigenmatrix)\n if invert:\n datatypes.replace_minimum(eigenmatrix, value=invert)\n # Turn back into a full matrix.\n eigenmatrix = np.diag(eigenmatrix)\n low_tri_idx = np.tril_indices_from(eigenmatrix)\n low_tri = eigenmatrix[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='jeigz',\n typ='eig',\n src_1=jin.filename,\n src_2=out.filename,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # GAUSSIAN EIGENMATRIX\n filenames = chain.from_iterable(coms['geigz'])\n for filename in filenames:\n log = check_outs(filename, outs, filetypes.GaussLog, direc)\n evals = log.evals * co.HESSIAN_CONVERSION\n if invert:\n datatypes.replace_minimum(evals, value=invert)\n eigenmatrix = np.diag(evals)\n low_tri_idx = np.tril_indices_from(eigenmatrix)\n low_tri = eigenmatrix[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='geigz',\n typ='eig',\n src_1=log.filename,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # MACROMODEL EIGENMATRIX USING JAGUAR EIGENVECTORS\n filenames = chain.from_iterable(coms['mjeig'])\n for comma_sep_filenames in filenames:\n name_mae, name_out = comma_sep_filenames.split(',')\n name_log = inps[name_mae].name_log\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n log = check_outs(name_log, outs, filetypes.MacroModelLog, direc)\n out = check_outs(name_out, outs, filetypes.JaguarOut, direc)\n hess = log.hessian\n dummies = mae.structures[0].get_dummy_atom_indices()\n hess_dummies = datatypes.get_dummy_hessian_indices(dummies)\n hess = datatypes.check_mm_dummy(hess, hess_dummies)\n evec = out.eigenvectors\n datatypes.mass_weight_eigenvectors(evec, out.structures[0].atoms)\n try:\n eigenmatrix = np.dot(np.dot(evec, hess), evec.T)\n except ValueError:\n logger.warning('Matrices not aligned!')\n logger.warning('Hessian retrieved from {}: {}'.format(\n log.filename, hess.shape))\n logger.warning('Eigenvectors retrieved from {}: {}'.format(\n name_out, evec.shape))\n raise\n low_tri_idx = np.tril_indices_from(eigenmatrix)\n low_tri = eigenmatrix[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='mjeig',\n typ='eig',\n src_1=mae.filename,\n src_2=out.filename,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n # MACROMODEL EIGENMATRIX USING GAUSSIAN EIGENVECTORS\n filenames = chain.from_iterable(coms['mgeig'])\n for comma_filenames in filenames:\n name_mae, name_gau_log = comma_filenames.split(',')\n name_mae_log = inps[name_mae].name_log\n mae = check_outs(name_mae, outs, filetypes.Mae, direc)\n mae_log = check_outs(name_mae_log, outs, filetypes.MacroModelLog, direc)\n gau_log = check_outs(name_gau_log, outs, filetypes.GaussLog, direc)\n hess = mae_log.hessian\n dummies = mae.structures[0].get_dummy_atom_indices()\n hess_dummies = datatypes.get_dummy_hessian_indices(dummies)\n hess = datatypes.check_mm_dummy(hess, hess_dummies)\n evec = gau_log.evecs\n try:\n eigenmatrix = np.dot(np.dot(evec, hess), evec.T)\n except ValueError:\n logger.warning('Matrices not aligned!')\n logger.warning('Hessian retrieved from {}: {}'.format(\n name_mae_log, hess.shape))\n logger.warning('Eigenvectors retrieved from {}: {}'.format(\n name_gau_log, evec.shape))\n raise\n low_tri_idx = np.tril_indices_from(eigenmatrix)\n low_tri = eigenmatrix[low_tri_idx]\n data.extend([datatypes.Datum(\n val=e,\n com='mgeig',\n typ='eig',\n src_1=name_mae,\n src_2=name_gau_log,\n idx_1=x + 1,\n idx_2=y + 1)\n for e, x, y in zip(\n low_tri, low_tri_idx[0], low_tri_idx[1])])\n logger.log(15, 'TOTAL DATA POINTS: {}'.format(len(data)))\n return np.array(data, dtype=datatypes.Datum)\n\ndef collect_data_fake(coms, inps, direc='.', sub_names=['OPT']):\n \"\"\"\n Generates a random data set quickly.\n \"\"\"\n import random\n data = []\n filenames = flatten(coms.values())\n for idx_1, filename in enumerate(filenames):\n for idx_2 in range(5):\n data.append(datatypes.Datum(\n val=random.uniform(0, 10),\n com='rand',\n typ='a',\n src_1=filename,\n idx_1=idx_1 + 1,\n idx_2=idx_2 + 1))\n return np.array(data, dtype=datatypes.Datum)\n\ndef flatten(l):\n \"\"\"\n Simple means to flatten an irregular list of lists.\n\n http://stackoverflow.com/questions/2158395/\n flatten-an-irregular-list-of-lists-in-python\n\n This goes a bit further than chain.from_iterable in that it can deal with\n an arbitrary number of nested lists.\n \"\"\"\n # Move this?\n import collections\n for el in l:\n if isinstance(el, collections.Iterable) and \\\n not isinstance(el, str):\n for sub in flatten(el):\n yield sub\n else:\n yield el\n\ndef collect_structural_data_from_mae(\n name_mae, inps, outs, direc, sub_names, com, ind, typ):\n \"\"\"\n Repeated code used to extract structural data from .mae files (through\n the generation of .mmo files).\n\n Would be nice to reduce the number of arguments. The problem here is in\n carrying through data for the generation of the Datum object.\n\n Not going to write a pretty __doc__ for this since I want to make so\n many changes. These changes will likely go along with modifications\n to the classes inside filetypes.\n \"\"\"\n data = []\n name_mmo = inps[name_mae].name_mmo\n # The indices is jsut a list for the calculation done, 'pre' or 'opt'.\n indices = inps[name_mae]._index_output_mmo\n\n mmo = check_outs(name_mmo, outs, filetypes.MacroModel, direc)\n selected_structures = filetypes.select_structures(\n mmo.structures, indices, ind)\n for idx_1, structure in selected_structures:\n data.extend(structure.select_data(\n typ,\n com=com,\n com_match=sub_names,\n src_1=mmo.filename,\n idx_1=idx_1 + 1))\n return data\n\n# Added by Tony.\n# Probably want to use check_outs function at somepoint.\ndef collect_structural_data_from_tinker_log(\n name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):\n select_struct = {'pre':0, 'opt':1}\n data = []\n name_log = inps[name_xyz].name_log\n log = check_outs(name_log, outs, filetypes.TinkerLog, direc)\n log_structure = log.structures\n struct = log_structure[select_struct[ind]]\n # Stuff to try out hessian.\n # xyz_struct = xyz_structure[0]\n # num_atoms = xyz_struct.props['total atoms']\n # hes_structure = inps[name_xyz].hess\n # hes_structure.natoms = num_atoms\n # hessian = hes_structure.hessian()\n # Stuff to try out hessian.\n if com in ['te','teo','tea','teao']:\n energy = struct.props['energy']\n new_datum = (datatypes.Datum(\n val=energy,\n typ=typ,\n src_1=name_log,\n idx_1=idx_1 + 1))\n return(new_datum)\n else:\n data.extend(struct.select_data(\n typ,\n com=com,\n src_1=name_log))\n return(data)\n\ndef collect_structural_data_from_tinker_log_for_gaussian(\n name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):\n select_struct = {'pre':0, 'opt':1}\n data = []\n name_log = inps[name_xyz].name_log\n log = check_outs(name_log, outs, filetypes.TinkerLog, direc)\n log_structure = log.structures\n struct = log_structure[select_struct[ind]]\n\n data.extend(struct.select_data(\n typ,\n com=com,\n src_1=name_log))\n return(data)\n\ndef collect_structural_data_from_amber_geo(\n name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):\n select_struct = {'pre':0, 'opt':1}\n data = []\n name_geo = inps[name_xyz].name_geo\n log = check_outs(name_geo, outs, filetypes.AmberGeo, direc) # returns classtype\n log_structure = log.structures\n struct = None\n if len(inps) == 1:\n struct = log_structure[0]\n else:\n struct = log_structure[select_struct[ind]]\n data.extend(struct.select_data(\n typ,\n com=com,\n src_1=name_geo))\n return(data)\ndef collect_structural_data_from_amber_ene(\n name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):\n # Problem with input only 1 file\n select_struct = {'pre':0, 'opt':1}\n data = []\n name_ene = inps[name_xyz].name_ene\n log = check_outs(name_ene, outs, filetypes.AmberEne, direc) # returns classtype\n log_structure = log.structures\n struct = None\n if len(inps) == 1:\n struct = log_structure[0]\n else:\n struct = log_structure[select_struct[ind]]\n if com in ['ae','aeo','aea','aeao','ae1','ae1o']:\n energy = struct.props['energy']\n new_datum = (datatypes.Datum(\n val=energy,\n typ=typ,\n src_1=name_ene,\n idx_1=idx_1 + 1))\n return(new_datum)\n else:\n data.extend(struct.select_data(\n typ,\n com=com,\n src_1=name_ene))\n return(data)\n\n\n\ndef sort_commands_by_filename(commands):\n '''\n Takes a dictionary of commands like...\n\n {'me': [['a1.01.mae', 'a2.01.mae', 'a3.01.mae'],\n ['b1.01.mae', 'b2.01.mae']],\n 'mb': [['a1.01.mae'], ['b1.01.mae']],\n 'jeig': [['a1.01.in,a1.out', 'b1.01.in,b1.out']]\n }\n\n ... and turn it into a dictionary that looks like...\n\n {'a1.01.mae': ['me', 'mb'],\n 'a1.01.in': ['jeig'],\n 'a1.out': ['jeig'],\n 'a2.01.mae': ['me'],\n 'a3.01.mae': ['me'],\n 'b1.01.mae': ['me', 'mb'],\n 'b1.01.in': ['jeig'],\n 'b1.out': ['jeig'],\n 'b2.01.mae': ['me']\n }\n\n Arguments\n ---------\n commands : dic\n\n Returns\n -------\n dictionary of the sorted commands\n '''\n sorted_commands = {}\n for command, groups_filenames in commands.items():\n for comma_separated in chain.from_iterable(groups_filenames):\n for filename in comma_separated.split(','):\n if filename in sorted_commands:\n sorted_commands[filename].append(command)\n else:\n sorted_commands[filename] = [command]\n return sorted_commands\n\n# Will also have to be updated. Maybe the Datum class too and how it responds\n# to assigning labels. \n## Why is this here? Is this deprecated? -Tony\ndef read_reference(filename):\n data = []\n with open(filename, 'r') as f:\n for line in f:\n # Skip certain lines.\n if line.startswith('-'):\n continue\n # Remove everything following a # in a line.\n line = line.partition('#')[0]\n cols = line.split()\n # There should always be 3 columns.\n if len(cols) == 3:\n lbl, wht, val = cols\n datum = datatypes.Datum(lbl=lbl, wht=float(wht), val=float(val))\n lbl_to_data_attrs(datum, lbl)\n data.append(datum)\n data = data.sort(key=datatypes.datum_sort_key)\n return np.array(data)\n\n\n## This is also part of the read_reference function above, but I think these \n## labels and attributes are important for handleing data.\n# Shouldn't be necessary anymore.\n# This should be based by the datum type and not the length of the parts list.\ndef lbl_to_data_attrs(datum, lbl):\n parts = lbl.split('_')\n datum.typ = parts[0]\n # if len(parts) == 3:\n if datum.typ in ['e','eo','ea','eao','eig','h','q','qh','qa']:\n idxs = parts[-1]\n # if len(parts) == 4:\n if datum.typ in ['b','t','a']:\n idxs = parts[-2]\n atm_nums = parts[-1]\n atm_nums = atm_nums.split('-')\n for i, atm_num in enumerate(atm_nums):\n setattr(datum, 'atm_{}'.format(i+1), int(atm_num))\n if datum.typ in ['p']:\n datum.src_1 = parts[1]\n idxs = parts[-1]\n if datum.typ in ['esp']:\n datum.src_1 = parts[1]\n idxs = parts[-1]\n idxs = idxs.split('-')\n datum.idx_1 = int(idxs[0])\n if len(idxs) == 2:\n datum.idx_2 == int(idxs[1])\n\n# Right now, this only looks good if the logger doesn't append each log\n# message with something (module, date/time, etc.).\n# It would be great if this output looked good regardless of the settings\n# used for the logger.\n# That goes for all of these pretty output functions that use TextWrapper.\ndef pretty_commands_for_files(commands_for_files, log_level=5):\n \"\"\"\n Logs the .mae commands dictionary, or the all of the commands\n used on a particular file.\n\n Arguments\n ---------\n commands_for_files : dic\n log_level : int\n \"\"\"\n if logger.getEffectiveLevel() <= log_level:\n foobar = TextWrapper(\n width=48, subsequent_indent=' '*26)\n logger.log(\n log_level,\n '--' + ' FILENAME '.center(22, '-') +\n '--' + ' COMMANDS '.center(22, '-') +\n '--')\n for filename, commands in commands_for_files.items():\n foobar.initial_indent = ' {:22s} '.format(filename)\n logger.log(log_level, foobar.fill(' '.join(commands)))\n logger.log(log_level, '-'*50)\n\ndef pretty_all_commands(commands, log_level=5):\n \"\"\"\n Logs the arguments/commands given to calculate that are used\n to request particular datatypes from particular files.\n\n Arguments\n ---------\n commands : dic\n log_level : int\n \"\"\"\n if logger.getEffectiveLevel() <= log_level:\n foobar = TextWrapper(width=48, subsequent_indent=' '*24)\n logger.log(log_level, '')\n logger.log(\n log_level,\n '--' + ' COMMAND '.center(9, '-') +\n '--' + ' GROUP # '.center(9, '-') +\n '--' + ' FILENAMES '.center(24, '-') +\n '--')\n for command, groups_filenames in commands.items():\n for i, filenames in enumerate(groups_filenames):\n if i == 0:\n foobar.initial_indent = \\\n ' {:9s} {:^9d} '.format(command, i+1)\n else:\n foobar.initial_indent = \\\n ' ' + ' '*9 + ' ' + '{:^9d} '.format(i+1)\n logger.log(log_level, foobar.fill(' '.join(filenames)))\n logger.log(log_level, '-'*50)\n\ndef pretty_data(data, log_level=20):\n \"\"\"\n Logs data as a table.\n\n Arguments\n ---------\n data : list of Datum\n log_level : int\n \"\"\"\n # Really, this should check every data point instead of only the 1st.\n if not data[0].wht:\n compare.import_weights(data)\n if log_level:\n string = ('--' + ' LABEL '.center(22, '-') +\n '--' + ' WEIGHT '.center(22, '-') +\n '--' + ' VALUE '.center(22, '-') +\n '--')\n logger.log(log_level, string)\n for d in data:\n if d.wht or d.wht == 0:\n string = (' ' + '{:22s}'.format(d.lbl) +\n ' ' + '{:22.4f}'.format(d.wht) +\n ' ' + '{:22.4f}'.format(d.val))\n else:\n string = (' ' + '{:22s}'.format(d.lbl) +\n ' ' + '{:22.4f}'.format(d.val))\n if log_level:\n logger.log(log_level, string)\n else:\n print(string)\n if log_level:\n logger.log(log_level, '-' * 50)\n\nif __name__ == '__main__':\n logging.config.dictConfig(co.LOG_SETTINGS)\n main(sys.argv[1:])\n"
] | [
[
"numpy.array",
"numpy.count_nonzero",
"numpy.dot",
"numpy.linalg.eigh",
"numpy.load",
"numpy.tril_indices_from",
"numpy.diag"
]
] |
lkusch/Kratos | [
"e8072d8e24ab6f312765185b19d439f01ab7b27b"
] | [
"applications/RomApplication/tests/test_empirical_cubature_method.py"
] | [
"#import python class test\nimport KratosMultiphysics.KratosUnittest as KratosUnittest\nimport KratosMultiphysics.kratos_utilities as kratos_utilities\n\n#import python packages\ntry:\n import numpy as np\n from KratosMultiphysics.RomApplication.empirical_cubature_method import EmpiricalCubatureMethod\n numpy_available = True\nexcept:\n numpy_available = False\n\ndef synthetic_matrix(degree, rows = 100):\n TestMatrix = np.zeros((rows,degree+1))\n x = np.linspace(0,1,rows)\n for i in range(degree+1):\n TestMatrix[:,i] = np.power(x,i)\n return TestMatrix\n\nclass TestEmpiricalCubatureMethod(KratosUnittest.TestCase):\n\n @KratosUnittest.skipUnless(numpy_available, \"numpy is required for RomApplication\")\n def test_empirical_cubature_method(self):\n\n for degree in range(5,10):\n TestMatrix = synthetic_matrix(degree) #Get a synthetic matrix (During the training of a ROM model, this is a matrix of residuals projected onto a basis)\n\n #Pass the matrix to the ECM and obtain a set of elements(points) and weights (these steps are contained in the Run method of the ElementSelector base class)\n ElementSelector = EmpiricalCubatureMethod(SVD_tolerance = 0, ECM_tolerance = 0)\n ElementSelector.SetUp(TestMatrix, 'test_number_of_elements', 'test_model_part_name')\n ElementSelector.Initialize()\n ElementSelector.Calculate()\n\n self.assertEqual( len(ElementSelector.z) , degree + 1 ) #for a polynomial of degree n, n+1 points are to be selected\n # Cleaning\n kratos_utilities.DeleteDirectoryIfExisting(\"__pycache__\")\n\nif __name__=='__main__':\n KratosUnittest.main()"
] | [
[
"numpy.linspace",
"numpy.power",
"numpy.zeros"
]
] |
FeiLi5/git-github.com-yt-project-yt | [
"0c6cf75351b91e4da80f6a0207ebbcb73dd72a59"
] | [
"yt/tests/test_testing.py"
] | [
"\"\"\"\nTests for yt.testing\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2018, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport matplotlib\n\nfrom yt.testing import assert_equal, requires_backend\n\n\ndef test_requires_backend():\n backend = matplotlib.get_backend().lower()\n other_backends = {\"gtkagg\", \"macosx\", \"wx\", \"tkagg\"} - {backend}\n\n @requires_backend(other_backends.pop())\n def plot_a():\n return True\n\n @requires_backend(backend)\n def plot_b():\n return True\n\n assert_equal(plot_a(), None)\n assert_equal(plot_b(), True)\n"
] | [
[
"matplotlib.get_backend"
]
] |
avramidis/fastseriation | [
"bddc79f21c8172c9af339d058190ab9255d3f66d"
] | [
"tests/test_basic.py"
] | [
"import unittest\nimport sys\nimport numpy\n\nsys.path.append('..')\nimport fastseriation.seriate\n\n\nclass TestFastSeriationMethods(unittest.TestCase):\n\n def test_fastseriation_1(self):\n\n nrows = 10\n ncols = 2\n\n scores = numpy.zeros((nrows, ncols))\n for i in range(nrows):\n for j in range(ncols):\n scores[i, j] = abs(i - j)\n\n per = numpy.random.permutation(nrows)\n scores_pertutated = scores[per, :]\n seriated_indexes = fastseriation.seriate.seriate(scores_pertutated)\n\n result = False\n\n if numpy.array_equiv(scores, scores_pertutated[seriated_indexes, :]):\n result = True\n\n if numpy.array_equiv(scores, numpy.flipud(scores_pertutated[seriated_indexes, :])):\n result = True\n\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.flipud",
"numpy.array_equiv",
"numpy.random.permutation",
"numpy.zeros"
]
] |
CollinJ0/antibodies | [
"7bfe86e9848f7194eefde1e0ea2765181278fb59"
] | [
"antibodies/plot.py"
] | [
"#!/usr/bin/env python\r\n# filename: plot.py\r\n\r\nimport itertools\r\nimport os\r\nimport sys\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nfrom abutils.utils import mongodb\r\nfrom abutils.utils import color\r\n\r\nimport pymongo\r\nfrom tqdm import tqdm\r\n\r\nfrom natsort import natsorted\r\n\r\nfrom .df import get_freq_df\r\n\r\ndef df_freq_barplot(df, cols=None):\r\n\r\n #get the columns\r\n if not cols:\r\n cols = list(df.columns)\r\n\r\n #melt the df\r\n df = df.melt(id_vars='index', value_vars=cols)\r\n\r\n #plot\r\n sns.barplot(data = df, x='index', y='value', hue='variable')\r\n \r\n \r\ndef QuickDataCheck(db, collections=None, index=False, values=None, match=None):\r\n # This function will quickly allow you to check the sequencing data of a database\r\n # database\r\n if type(db) == pymongo.database.Database:\r\n DB = db\r\n elif type(db) == str:\r\n DB = mongodb.get_db(db)\r\n else:\r\n print ('Database not correct')\r\n return\r\n \r\n if collections is None:\r\n colls = mongodb.get_collections(DB)\r\n else:\r\n colls = collections\r\n \r\n #index the collections if applicable \r\n if index:\r\n print('Indexing Collections...')\r\n for collection in tqdm(colls):\r\n DB[collection].create_index([('chain', 1),\r\n ('prod', 1), \r\n ('v_gene.gene', 1), \r\n ('cdr3_len', 1)], \r\n name='productive heavychain cdr3_len', \r\n default_language='english')\r\n #if there is a set values, then use those\r\n if values:\r\n print('Getting data...')\r\n dfs = [get_freq_df(DB, colls, value, normalize=True, match=match) for value in values]\r\n\r\n else:\r\n print('Getting data...')\r\n values = ['v_gene.gene', 'cdr3_len']\r\n dfs = [get_freq_df(DB, colls, value, normalize=True, match=match) for value in values]\r\n \r\n #now plot the figures for each value\r\n for df, value in zip(dfs, values):\r\n print('-----------')\r\n print(value)\r\n print('-----------')\r\n for collection in df.columns:\r\n print(collection)\r\n #Try to plot the value unless the requested value is invalid\r\n try:\r\n df2 = pd.DataFrame(df[collection]).reset_index().melt(id_vars='index', value_vars=df.columns)\r\n try:\r\n fam = [d.split('-')[0] for d in df2['index']]\r\n df2['fam'] = fam\r\n except AttributeError:\r\n None\r\n\r\n plt.figure(figsize=[12,4])\r\n try:\r\n g = sns.barplot(data = df2, x='index', y='value', hue='fam', dodge=False) \r\n except ValueError:\r\n g = sns.barplot(data = df2, x='index', y='value', dodge=False)\r\n try:\r\n g.get_legend().remove()\r\n except AttributeError:\r\n None\r\n plt.xticks(rotation=90)\r\n plt.tight_layout()\r\n plt.show()\r\n print(' ')\r\n except ValueError:\r\n print('The value you requested is in valid')\r\n\r\n"
] | [
[
"pandas.DataFrame",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks"
]
] |
nju-websoft/AliNet | [
"feb34993f1181df1cc05cc1542bd6f3590fa1ce2"
] | [
"code/align/preprocess.py"
] | [
"import random\n\nimport numpy as np\nimport pandas as pd\n\n\ndef remove_unlinked_triples(triples, linked_ents):\n print(\"before removing unlinked triples:\", len(triples))\n new_triples = set()\n for h, r, t in triples:\n if h in linked_ents and t in linked_ents:\n new_triples.add((h, r, t))\n print(\"after removing unlinked triples:\", len(new_triples))\n return list(new_triples)\n\n\ndef enhance_triples(kg1, kg2, ents1, ents2):\n assert len(ents1) == len(ents2)\n print(\"before enhanced:\", len(kg1.triples), len(kg2.triples))\n enhanced_triples1, enhanced_triples2 = set(), set()\n links1 = dict(zip(ents1, ents2))\n links2 = dict(zip(ents2, ents1))\n for h1, r1, t1 in kg1.triples:\n h2 = links1.get(h1, None)\n t2 = links1.get(t1, None)\n if h2 is not None and t2 is not None and t2 not in kg2.out_related_ents_dict.get(h2, set()):\n enhanced_triples2.add((h2, r1, t2))\n for h2, r2, t2 in kg2.triples:\n h1 = links2.get(h2, None)\n t1 = links2.get(t2, None)\n if h1 is not None and t1 is not None and t1 not in kg1.out_related_ents_dict.get(h1, set()):\n enhanced_triples1.add((h1, r2, t1))\n print(\"after enhanced:\", len(enhanced_triples1), len(enhanced_triples2))\n return enhanced_triples1, enhanced_triples2\n\n\ndef generate_3hop_triples(kg, two_hop_triples, linked_ents=None):\n\n two_triple_df = np.array([[tr[0], tr[1], tr[2]] for tr in two_hop_triples])\n two_triple_df = pd.DataFrame(two_triple_df, columns=['h', 'r', 't'])\n\n triples = kg.triples\n if linked_ents is not None:\n triples = remove_unlinked_triples(triples, linked_ents)\n triple_df = np.array([[tr[0], tr[1], tr[2]] for tr in triples])\n triple_df = pd.DataFrame(triple_df, columns=['h', 'r', 't'])\n # print(triple_df)\n two_hop_triple_df = pd.merge(two_triple_df, triple_df, left_on='t', right_on='h')\n # print(two_hop_triple_df)\n two_step_quadruples = set()\n relation_patterns = dict()\n for index, row in two_hop_triple_df.iterrows():\n head = row[\"h_x\"]\n tail = row[\"t_y\"]\n r_x = row[\"r_x\"]\n r_y = row['r_y']\n if tail not in kg.out_related_ents_dict.get(head, set()) and \\\n head not in kg.in_related_ents_dict.get(tail, set()):\n relation_patterns[(r_x, r_y)] = relation_patterns.get((r_x, r_y), 0) + 1\n two_step_quadruples.add((head, r_x, r_y, tail))\n print(\"total 3-hop neighbors:\", len(two_step_quadruples))\n print(\"total 3-hop relation patterns:\", len(relation_patterns))\n relation_patterns = sorted(relation_patterns.items(), key=lambda x: x[1], reverse=True)\n p = 0.05\n num = int(p * len(relation_patterns))\n selected_patterns = set()\n # for i in range(20, num):\n for i in range(5, len(relation_patterns)):\n pattern = relation_patterns[i][0]\n selected_patterns.add(pattern)\n print(\"selected relation patterns:\", len(selected_patterns))\n two_step_triples = set()\n for head, rx, ry, tail in two_step_quadruples:\n if (rx, ry) in selected_patterns:\n two_step_triples.add((head, 0, head))\n two_step_triples.add((head, rx + ry, tail))\n print(\"selected 3-hop neighbors:\", len(two_step_triples))\n return two_step_triples\n\n\ndef generate_2hop_triples(kg, linked_ents=None):\n triples = kg.triples\n if linked_ents is not None:\n triples = remove_unlinked_triples(triples, linked_ents)\n triple_df = np.array([[tr[0], tr[1], tr[2]] for tr in triples])\n triple_df = pd.DataFrame(triple_df, columns=['h', 'r', 't'])\n # print(triple_df)\n two_hop_triple_df = pd.merge(triple_df, triple_df, left_on='t', right_on='h')\n # print(two_hop_triple_df)\n two_step_quadruples = set()\n relation_patterns = dict()\n for index, row in two_hop_triple_df.iterrows():\n head = row[\"h_x\"]\n tail = row[\"t_y\"]\n r_x = row[\"r_x\"]\n r_y = row['r_y']\n if tail not in kg.out_related_ents_dict.get(head, set()) and \\\n head not in kg.in_related_ents_dict.get(tail, set()):\n relation_patterns[(r_x, r_y)] = relation_patterns.get((r_x, r_y), 0) + 1\n two_step_quadruples.add((head, r_x, r_y, tail))\n print(\"total 2-hop neighbors:\", len(two_step_quadruples))\n print(\"total 2-hop relation patterns:\", len(relation_patterns))\n relation_patterns = sorted(relation_patterns.items(), key=lambda x: x[1], reverse=True)\n p = 0.05\n num = int(p * len(relation_patterns))\n selected_patterns = set()\n # for i in range(20, num):\n for i in range(5, len(relation_patterns)):\n pattern = relation_patterns[i][0]\n selected_patterns.add(pattern)\n print(\"selected relation patterns:\", len(selected_patterns))\n two_step_triples = set()\n for head, rx, ry, tail in two_step_quadruples:\n if (rx, ry) in selected_patterns:\n two_step_triples.add((head, 0, head))\n two_step_triples.add((head, rx + ry, tail))\n print(\"selected 2-hop neighbors:\", len(two_step_triples))\n return two_step_triples\n\n\ndef generate_2steps_path(triples):\n tr = np.array([[tr[0], tr[2], tr[1]] for tr in triples])\n tr = pd.DataFrame(tr, columns=['h', 't', 'r'])\n \"\"\"\n h t r\n 0 21860 8837 18\n 1 2763 25362 42\n 2 158 22040 130\n \"\"\"\n sizes = tr.groupby(['h', 'r']).size()\n sizes.name = 'size'\n tr = tr.join(sizes, on=['h', 'r'])\n train_raw_df = tr[['h', 'r', 't', 'size']]\n two_step_df = pd.merge(train_raw_df, train_raw_df, left_on='t', right_on='h')\n print(\"total 2-hop triples:\", two_step_df.shape[0])\n \"\"\"\n h_x r_x t_x size_x h_y r_y t_y size_y\n 0 21860 18 8837 5 8837 18 1169 7\n 1 21860 18 8837 5 8837 18 24618 7\n 2 21860 18 8837 5 8837 216 1899 1\n 3 21860 18 8837 5 8837 18 523 7\n \"\"\"\n two_hop_relations = two_step_df[['r_x', 'r_y']]\n \"\"\"\n r_x r_y\n 0 18 18\n 1 18 18\n 2 18 216\n \"\"\"\n freq = two_hop_relations.groupby(['r_x', 'r_y']).size()\n freq.name = 'freq'\n freq_two_hop_relations = two_hop_relations.join(freq, on=['r_x', 'r_y']).drop_duplicates().dropna(axis=0)\n freq_two_hop_relations = freq_two_hop_relations.sort_values('freq', axis=0, ascending=False)\n \"\"\"\n r_x r_y freq\n 0 18 18 34163.0\n 90980 103 18 34163.0\n \"\"\"\n # print(freq_two_hop_relations)\n total_lines = freq_two_hop_relations.shape[0]\n print(\"total relation paths:\", total_lines)\n p = 0.1\n num = int(p * total_lines)\n print(\"choose top\", num)\n freq_two_hop_relations = freq_two_hop_relations.head(num)[['r_x', 'r_y']].values.tolist()\n freq_two_hop_relations = [(x, y) for x, y in freq_two_hop_relations]\n freq_two_hop_relations = set(freq_two_hop_relations)\n two_step_triples = set()\n for index, row in two_step_df.iterrows():\n head = row[\"h_x\"]\n tail = row[\"t_y\"]\n r_x = row[\"r_x\"]\n r_y = row['r_y']\n if (r_x, r_y) in freq_two_hop_relations:\n two_step_triples.add((head, r_x + r_y, tail))\n print(\"new two hop neighbors:\", len(two_step_triples))\n return set(two_step_triples)\n"
] | [
[
"pandas.DataFrame",
"numpy.array",
"pandas.merge"
]
] |
MarcusRainbow/TwoPlayerGame | [
"a10c0a5f949bcb6e03bdf8197c513511b3c29f98"
] | [
"connect4.py"
] | [
"from game import Game\r\nfrom board import Board\r\nimport numpy as np\r\nfrom typing import Tuple, List\r\n\r\nclass Connect4(Game):\r\n def __init__(self, starting_grid = np.zeros((6, 7), dtype=int)):\r\n self.starting_grid = starting_grid\r\n\r\n def clean_board(self) -> Board:\r\n return Board(np.copy(self.starting_grid), lambda grid: legal_moves(grid)) \r\n\r\n def tokens(self) -> Tuple[int, int]:\r\n return (-1, 1)\r\n\r\n def test_win(self, board: Board) -> int:\r\n \"\"\"\r\n A win in connect 4 is a line of 4 the same in any\r\n row column or diagonal\r\n \"\"\"\r\n if are_four_connected(-1, board.grid):\r\n return 0\r\n elif are_four_connected(1, board.grid):\r\n return 1\r\n else:\r\n return -1\r\n\r\ndef legal_moves(grid: np.ndarray) -> List[int]:\r\n \"\"\"\r\n You can legally place a token at the bottom of any column,\r\n above any other previous tokens\r\n \"\"\"\r\n cols = np.size(grid, 1)\r\n result = []\r\n\r\n for x, column in enumerate(np.transpose(grid)):\r\n # find the first zero element\r\n y = np.where(column == 0)\r\n if len(y[0]) > 0:\r\n pos = x + y[0][0] * cols\r\n result.append(pos)\r\n\r\n return result\r\n\r\ndef are_four_connected(player: int, grid: np.ndarray) -> bool:\r\n \"\"\"\r\n Algorithm from stack overflow. Converted from Java.\r\n \"\"\"\r\n cols = np.size(grid, 1)\r\n rows = np.size(grid, 0)\r\n\r\n # horizontalCheck \r\n for x in range(cols - 3):\r\n for y in range(rows):\r\n if (grid[y, x] == player and grid[y, x+1] == player\r\n and grid[y, x+2] == player and grid[y, x+3] == player):\r\n return True\r\n\r\n # verticalCheck\r\n for y in range(rows - 3):\r\n for x in range(cols):\r\n if (grid[y, x] == player and grid[y+1, x] == player\r\n and grid[y+2, x] == player and grid[y+3, x] == player):\r\n return True\r\n \r\n # ascendingDiagonalCheck \r\n for y in range(3, rows):\r\n for x in range(cols - 3):\r\n if (grid[y, x] == player and grid[y-1, x+1] == player\r\n and grid[y-2, x+2] == player and grid[y-3, x+3] == player):\r\n return True\r\n\r\n # descendingDiagonalCheck\r\n for y in range(3, rows):\r\n for x in range(3, cols):\r\n if (grid[y, x] == player and grid[y-1, x-1] == player\r\n and grid[y-2, x-2] == player and grid[y-3, x-3] == player):\r\n return True\r\n\r\n return False\r\n\r\nif __name__ == \"__main__\":\r\n from play import play_game\r\n from human_player import HumanPlayer\r\n from clever_player import CleverPlayer\r\n\r\n game = Connect4()\r\n play_game(HumanPlayer(), CleverPlayer(game, 1, 4), game)\r\n"
] | [
[
"numpy.zeros",
"numpy.copy",
"numpy.where",
"numpy.transpose",
"numpy.size"
]
] |
Borda/pyImSegm | [
"c52e709f9fcd90bcc6ab06e515685fd4c4e6c301"
] | [
"experiments_ovary_centres/run_center_clustering.py"
] | [
"\"\"\"\nThe clustering is already part of the center prediction scipt.\nThe path to the image and segmentation serves just for visualisation,\nfor the own clustering they are not needed.\n\nCopyright (C) 2017 Jiri Borovec <[email protected]>\n\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom functools import partial\n\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nfrom sklearn import cluster\n\nif os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg':\n print('No display found. Using non-interactive Agg backend.')\n matplotlib.use('Agg')\n\nimport matplotlib.pylab as plt\n\nsys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\nimport run_center_candidate_training as run_train\n\nimport imsegm.utilities.data_io as tl_data\nimport imsegm.utilities.drawing as tl_visu\nimport imsegm.utilities.experiments as tl_expt\n\n# import run_center_prediction as run_pred\n\n# Set experiment folders\nFOLDER_CENTER = 'centers'\nFOLDER_CLUSTER_VISUAL = 'centers_clustering'\nLIST_SUBDIRS = [FOLDER_CENTER, FOLDER_CLUSTER_VISUAL]\n\nIMAGE_EXTENSIONS = ['.png', '.jpg']\n# subfigure size for visualisations\nMAX_FIGURE_SIZE = 12\nFOLDER_EXPERIMENT = 'detect-centers-predict_%s'\nNAME_YAML_PARAMS = 'config_clustering.yaml'\n\n# The asumtion is that the max distance is about 3 * sampling distance\nCLUSTER_PARAMS = {\n 'DBSCAN_max_dist': 50,\n 'DBSCAN_min_samples': 1,\n}\nDEFAULT_PARAMS = run_train.CENTER_PARAMS\nDEFAULT_PARAMS.update(CLUSTER_PARAMS)\nDEFAULT_PARAMS.update({\n 'path_images': os.path.join(run_train.PATH_IMAGES, 'image', '*.jpg'),\n 'path_segms': os.path.join(run_train.PATH_IMAGES, 'segm', '*.png'),\n 'path_centers': os.path.join(\n DEFAULT_PARAMS['path_output'], FOLDER_EXPERIMENT % DEFAULT_PARAMS['name'], 'candidates', '*.csv'\n )\n})\n\n\ndef cluster_center_candidates(points, max_dist=100, min_samples=1):\n \"\"\" cluster center candidates by given density clustering\n\n :param list(list(float)) points: points\n :param float max_dist: maximal distance among points\n :param int min_samples: minimal number od samples\n :return tuple(ndarray, list(int)):\n \"\"\"\n points = np.array(points)\n if not list(points):\n return points, []\n dbscan = cluster.DBSCAN(eps=max_dist, min_samples=min_samples)\n dbscan.fit(points)\n labels = dbscan.labels_.copy()\n\n centers = []\n for i in range(max(labels) + 1):\n clust = points[labels == i]\n if len(clust) > 0:\n center = np.mean(clust, axis=0)\n centers.append(center)\n\n return np.array(centers), labels\n\n\ndef export_draw_image_centers_clusters(\n path_out,\n name,\n img,\n centres,\n points=None,\n clust_labels=None,\n segm=None,\n fig_suffix='',\n max_fig_size=MAX_FIGURE_SIZE,\n):\n \"\"\" draw visualisation of clustered center candidates and export it\n\n :param str path_out:\n :param str name:\n :param ndarray img:\n :param centres:\n :param list(list(float)) points:\n :param list(int) clust_labels:\n :param ndarray segm:\n :param str fig_suffix:\n :param int max_fig_size:\n \"\"\"\n # if the output dos nor exist, leave\n if not os.path.isdir(path_out):\n return\n\n size = None\n if img is not None:\n size = np.array(img.shape[:2][::-1], dtype=float)\n elif segm is not None:\n size = np.array(segm.shape[:2][::-1], dtype=float)\n\n if size is not None:\n fig_size = (size / size.max() * max_fig_size)\n else:\n fig_size = (max_fig_size, max_fig_size)\n\n fig, ax = plt.subplots(figsize=fig_size)\n if img.ndim == 3:\n img = img[:, :, 0]\n tl_visu.draw_image_clusters_centers(ax, img, centres, points, clust_labels, segm)\n\n fig.tight_layout(pad=0)\n fig.savefig(os.path.join(path_out, name + fig_suffix + '.png'))\n plt.close(fig)\n\n\ndef cluster_points_draw_export(dict_row, params, path_out=None):\n \"\"\" cluster points into centers and export visualisations\n\n :param dict dict_row:\n :param dict params:\n :param str path_out:\n :return dict:\n \"\"\"\n if not all(n in dict_row for n in ['path_points', 'path_image', 'path_segm']):\n raise ValueError('missing some required fields: %r' % dict_row)\n name = os.path.splitext(os.path.basename(dict_row['path_points']))[0]\n points = tl_data.load_landmarks_csv(dict_row['path_points'])\n if not list(points):\n logging.debug('no points to cluster for \"%s\"', name)\n points = tl_data.swap_coord_x_y(points)\n\n centres, clust_labels = cluster_center_candidates(\n points, max_dist=params['DBSCAN_max_dist'], min_samples=params['DBSCAN_min_samples']\n )\n path_csv = os.path.join(path_out, FOLDER_CENTER, name + '.csv')\n tl_data.save_landmarks_csv(path_csv, tl_data.swap_coord_x_y(centres))\n\n path_visu = os.path.join(path_out, FOLDER_CLUSTER_VISUAL)\n\n img, segm = None, None\n if dict_row['path_image'] is not None and os.path.isfile(dict_row['path_image']):\n img = tl_data.io_imread(dict_row['path_image'])\n if dict_row['path_segm'] is not None and os.path.isfile(dict_row['path_segm']):\n segm = tl_data.io_imread(dict_row['path_segm'])\n\n export_draw_image_centers_clusters(path_visu, name, img, centres, points, clust_labels, segm)\n dict_row.update({'image': name, 'path_centers': path_csv, 'nb_centres': len(centres)})\n return dict_row\n\n\n# def load_centers_images_segm(path_pattern_csv, path_images, path_segms):\n# list_csv = sorted(glob.glob(path_pattern_csv))\n# logging.info('found %i csv files', len(list_csv))\n# # filter only csv files win specific format\n# # list_csv = [p for p in list_csv\n# # if re.match(PATTERN_NAME_CSV_CENTERS, os.path.basename(p)) is not None]\n# # logging.info('filtered to %i center files', len(list_csv))\n#\n# def add_img_path(name, key, path_dir):\n# for im_ext in IMAGE_EXTENSIONS:\n# path_img = os.path.join(path_dir, name + im_ext)\n# if os.path.exists(path_img):\n# d[key] = path_img\n# break\n# else:\n# d[key] = None\n#\n# df_paths = pd.DataFrame()\n# for path_csv in list_csv:\n# d = {'path_points': path_csv}\n# name = os.path.splitext(os.path.basename(path_csv))[0]\n# add_img_path(name, 'path_image', os.path.dirname(path_images))\n# add_img_path(name, 'path_segm', os.path.dirname(path_segms))\n# df_paths = df_paths.append(d, ignore_index=True)\n# return df_paths\n\n\ndef main(params):\n \"\"\" PIPELINE candidate clustering\n\n :param dict(str,any) params:\n \"\"\"\n params['path_expt'] = os.path.join(params['path_output'], FOLDER_EXPERIMENT % params['name'])\n tl_expt.save_config_yaml(os.path.join(params['path_expt'], NAME_YAML_PARAMS), params)\n tl_expt.create_subfolders(params['path_expt'], LIST_SUBDIRS)\n\n list_paths = [params[k] for k in ['path_images', 'path_segms', 'path_centers']]\n df_paths = tl_data.find_files_match_names_across_dirs(list_paths)\n df_paths.columns = ['path_image', 'path_segm', 'path_points']\n df_paths.index = range(1, len(df_paths) + 1)\n path_cover = os.path.join(params['path_expt'], run_train.NAME_CSV_TRIPLES)\n df_paths.to_csv(path_cover)\n\n logging.info('run clustering...')\n df_paths_new = pd.DataFrame()\n _wrapper_clustering = partial(cluster_points_draw_export, params=params, path_out=params['path_expt'])\n rows = (dict(row) for idx, row in df_paths.iterrows())\n iterate = tl_expt.WrapExecuteSequence(_wrapper_clustering, rows, nb_workers=params['nb_workers'])\n for dict_center in iterate:\n df_paths_new = df_paths_new.append(dict_center, ignore_index=True)\n\n df_paths_new.set_index('image', inplace=True)\n df_paths_new.to_csv(path_cover)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n logging.info('running...')\n\n cli_params = run_train.arg_parse_params(DEFAULT_PARAMS)\n main(cli_params)\n\n logging.info('DONE')\n"
] | [
[
"matplotlib.use",
"numpy.array",
"pandas.DataFrame",
"matplotlib.pylab.close",
"numpy.mean",
"sklearn.cluster.DBSCAN",
"matplotlib.pylab.subplots"
]
] |
KonstantinKlepikov/pykeen | [
"bbfd0effc4b541a83c365865456df230059c0c77"
] | [
"src/pykeen/models/unimodal/structured_embedding.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Implementation of structured model (SE).\"\"\"\n\nimport functools\nfrom typing import Optional\n\nimport numpy as np\nimport torch\nimport torch.autograd\nfrom torch import nn\nfrom torch.nn import functional\n\nfrom ..base import EntityEmbeddingModel\nfrom ...constants import DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE\nfrom ...losses import Loss\nfrom ...nn import Embedding\nfrom ...nn.init import xavier_uniform_\nfrom ...regularizers import Regularizer\nfrom ...triples import TriplesFactory\nfrom ...typing import DeviceHint\nfrom ...utils import compose\n\n__all__ = [\n 'StructuredEmbedding',\n]\n\n\nclass StructuredEmbedding(EntityEmbeddingModel):\n r\"\"\"An implementation of the Structured Embedding (SE) published by [bordes2011]_.\n\n SE applies role- and relation-specific projection matrices\n $\\textbf{M}_{r}^{h}, \\textbf{M}_{r}^{t} \\in \\mathbb{R}^{d \\times d}$ to the head and tail\n entities' embeddings before computing their differences. Then, the $l_p$ norm is applied\n and the result is negated such that smaller differences are considered better.\n\n .. math::\n\n f(h, r, t) = - \\|\\textbf{M}_{r}^{h} \\textbf{e}_h - \\textbf{M}_{r}^{t} \\textbf{e}_t\\|_p\n\n By employing different projections for the embeddings of the head and tail entities, SE explicitly differentiates\n the role of an entity as either the subject or object.\n \"\"\"\n\n #: The default strategy for optimizing the model's hyper-parameters\n hpo_default = dict(\n embedding_dim=DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE,\n scoring_fct_norm=dict(type=int, low=1, high=2),\n )\n\n def __init__(\n self,\n triples_factory: TriplesFactory,\n embedding_dim: int = 50,\n scoring_fct_norm: int = 1,\n loss: Optional[Loss] = None,\n preferred_device: DeviceHint = None,\n random_seed: Optional[int] = None,\n regularizer: Optional[Regularizer] = None,\n ) -> None:\n r\"\"\"Initialize SE.\n\n :param embedding_dim: The entity embedding dimension $d$. Is usually $d \\in [50, 300]$.\n :param scoring_fct_norm: The $l_p$ norm. Usually 1 for SE.\n \"\"\"\n super().__init__(\n triples_factory=triples_factory,\n embedding_dim=embedding_dim,\n loss=loss,\n preferred_device=preferred_device,\n random_seed=random_seed,\n regularizer=regularizer,\n entity_initializer=xavier_uniform_,\n entity_constrainer=functional.normalize,\n )\n\n self.scoring_fct_norm = scoring_fct_norm\n\n # Embeddings\n init_bound = 6 / np.sqrt(self.embedding_dim)\n # Initialise relation embeddings to unit length\n initializer = compose(\n functools.partial(nn.init.uniform_, a=-init_bound, b=+init_bound),\n functional.normalize,\n )\n self.left_relation_embeddings = Embedding.init_with_device(\n num_embeddings=triples_factory.num_relations,\n embedding_dim=embedding_dim ** 2,\n device=self.device,\n initializer=initializer,\n )\n self.right_relation_embeddings = Embedding.init_with_device(\n num_embeddings=triples_factory.num_relations,\n embedding_dim=embedding_dim ** 2,\n device=self.device,\n initializer=initializer,\n )\n\n def _reset_parameters_(self): # noqa: D102\n super()._reset_parameters_()\n self.left_relation_embeddings.reset_parameters()\n self.right_relation_embeddings.reset_parameters()\n\n def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102\n # Get embeddings\n h = self.entity_embeddings(indices=hrt_batch[:, 0]).view(-1, self.embedding_dim, 1)\n rel_h = self.left_relation_embeddings(indices=hrt_batch[:, 1]).view(-1, self.embedding_dim, self.embedding_dim)\n rel_t = self.right_relation_embeddings(indices=hrt_batch[:, 1]).view(-1, self.embedding_dim, self.embedding_dim)\n t = self.entity_embeddings(indices=hrt_batch[:, 2]).view(-1, self.embedding_dim, 1)\n\n # Project entities\n proj_h = rel_h @ h\n proj_t = rel_t @ t\n\n scores = -torch.norm(proj_h - proj_t, dim=1, p=self.scoring_fct_norm)\n return scores\n\n def score_t(self, hr_batch: torch.LongTensor, slice_size: int = None) -> torch.FloatTensor: # noqa: D102\n # Get embeddings\n h = self.entity_embeddings(indices=hr_batch[:, 0]).view(-1, self.embedding_dim, 1)\n rel_h = self.left_relation_embeddings(indices=hr_batch[:, 1]).view(-1, self.embedding_dim, self.embedding_dim)\n rel_t = self.right_relation_embeddings(indices=hr_batch[:, 1])\n rel_t = rel_t.view(-1, 1, self.embedding_dim, self.embedding_dim)\n t_all = self.entity_embeddings(indices=None).view(1, -1, self.embedding_dim, 1)\n\n if slice_size is not None:\n proj_t_arr = []\n # Project entities\n proj_h = rel_h @ h\n\n for t in torch.split(t_all, slice_size, dim=1):\n # Project entities\n proj_t = rel_t @ t\n proj_t_arr.append(proj_t)\n\n proj_t = torch.cat(proj_t_arr, dim=1)\n\n else:\n # Project entities\n proj_h = rel_h @ h\n proj_t = rel_t @ t_all\n\n scores = -torch.norm(proj_h[:, None, :, 0] - proj_t[:, :, :, 0], dim=-1, p=self.scoring_fct_norm)\n\n return scores\n\n def score_h(self, rt_batch: torch.LongTensor, slice_size: int = None) -> torch.FloatTensor: # noqa: D102\n # Get embeddings\n h_all = self.entity_embeddings(indices=None).view(1, -1, self.embedding_dim, 1)\n rel_h = self.left_relation_embeddings(indices=rt_batch[:, 0])\n rel_h = rel_h.view(-1, 1, self.embedding_dim, self.embedding_dim)\n rel_t = self.right_relation_embeddings(indices=rt_batch[:, 0]).view(-1, self.embedding_dim, self.embedding_dim)\n t = self.entity_embeddings(indices=rt_batch[:, 1]).view(-1, self.embedding_dim, 1)\n\n if slice_size is not None:\n proj_h_arr = []\n\n # Project entities\n proj_t = rel_t @ t\n\n for h in torch.split(h_all, slice_size, dim=1):\n # Project entities\n proj_h = rel_h @ h\n proj_h_arr.append(proj_h)\n\n proj_h = torch.cat(proj_h_arr, dim=1)\n else:\n # Project entities\n proj_h = rel_h @ h_all\n proj_t = rel_t @ t\n\n scores = -torch.norm(proj_h[:, :, :, 0] - proj_t[:, None, :, 0], dim=-1, p=self.scoring_fct_norm)\n\n return scores\n"
] | [
[
"torch.norm",
"torch.split",
"torch.cat",
"numpy.sqrt"
]
] |
hernot/hickle | [
"3b5efbb0d10f13a434b6eed9046dec7fc7bbde85"
] | [
"hickle/tests/test_hickle.py"
] | [
"#! /usr/bin/env python\n# encoding: utf-8\n\"\"\"\n# test_hickle.py\n\nUnit tests for hickle module.\n\n\"\"\"\n\n\n# %% IMPORTS\n# Built-in imports\nfrom collections import OrderedDict as odict\nimport os\nfrom pprint import pprint\n\n# Package imports\nimport h5py\nimport numpy as np\nfrom py.path import local\nimport pytest\n\n# hickle imports\nfrom hickle import dump, helpers, hickle, load, loaders\n\n# Set current working directory to the temporary directory\nlocal.get_temproot().chdir()\n\n\n# %% GLOBALS\nNESTED_DICT = {\n \"level1_1\": {\n \"level2_1\": [1, 2, 3],\n \"level2_2\": [4, 5, 6]\n },\n \"level1_2\": {\n \"level2_1\": [1, 2, 3],\n \"level2_2\": [4, 5, 6]\n },\n \"level1_3\": {\n \"level2_1\": {\n \"level3_1\": [1, 2, 3],\n \"level3_2\": [4, 5, 6]\n },\n \"level2_2\": [4, 5, 6]\n }\n}\n\n\n# %% HELPER DEFINITIONS\n# Define a test function that must be serialized and unpacked again\ndef func(a, b, c=0):\n return(a, b, c)\n\n\n# Define a class that must always be pickled\nclass with_state(object):\n def __init__(self):\n self.a = 12\n self.b = {\n 'love': np.ones([12, 7]),\n 'hatred': np.zeros([4, 9])}\n\n def __getstate__(self):\n self.a *= 2\n return({\n 'a': self.a,\n 'b': self.b})\n\n def __setstate__(self, state):\n self.a = state['a']\n self.b = state['b']\n\n def __getitem__(self, index):\n if(index == 0):\n return(self.a)\n if(index < 2):\n return(self.b['hatred'])\n if(index > 2):\n raise ValueError(\"index unknown\")\n return(self.b['love'])\n\n\n# %% FUNCTION DEFINITIONS\ndef test_invalid_file():\n \"\"\" Test if trying to use a non-file object fails. \"\"\"\n\n with pytest.raises(hickle.FileError):\n dump('test', ())\n\n\ndef test_state_obj():\n \"\"\" Dumping and loading a class object with pickle states\n\n https://github.com/telegraphic/hickle/issues/125\"\"\"\n filename, mode = 'test.h5', 'w'\n obj = with_state()\n with pytest.warns(loaders.load_builtins.SerializedWarning):\n dump(obj, filename, mode)\n obj_hkl = load(filename)\n assert type(obj) == type(obj_hkl)\n assert np.allclose(obj[1], obj_hkl[1])\n\n\ndef test_local_func():\n \"\"\" Dumping and loading a local function\n\n https://github.com/telegraphic/hickle/issues/119\"\"\"\n filename, mode = 'test.h5', 'w'\n with pytest.warns(loaders.load_builtins.SerializedWarning):\n dump(func, filename, mode)\n func_hkl = load(filename)\n assert type(func) == type(func_hkl)\n assert func(1, 2) == func_hkl(1, 2)\n\n\ndef test_binary_file():\n \"\"\" Test if using a binary file works\n\n https://github.com/telegraphic/hickle/issues/123\"\"\"\n\n with open(\"test.hdf5\", \"w\") as f:\n hickle.dump(None, f)\n\n with open(\"test.hdf5\", \"wb\") as f:\n hickle.dump(None, f)\n\n\ndef test_non_empty_group():\n \"\"\" Test if attempting to dump to a group with data fails \"\"\"\n\n hickle.dump(None, 'test.hdf5')\n with pytest.raises(ValueError):\n dump(None, 'test.hdf5', 'r+')\n\n\ndef test_invalid_path():\n \"\"\" Test if attempting to load from an invalid path fails \"\"\"\n\n hickle.dump(None, 'test.hdf5')\n with pytest.raises(ValueError):\n hickle.load('test.hdf5', path='/test')\n\n\ndef test_string():\n \"\"\" Dumping and loading a string \"\"\"\n filename, mode = 'test.h5', 'w'\n string_obj = \"The quick brown fox jumps over the lazy dog\"\n dump(string_obj, filename, mode)\n string_hkl = load(filename)\n assert isinstance(string_hkl, str)\n assert string_obj == string_hkl\n\n\ndef test_65bit_int():\n \"\"\" Dumping and loading an integer with arbitrary precision\n\n https://github.com/telegraphic/hickle/issues/113\"\"\"\n i = 2**64\n dump(i, 'test.hdf5')\n i_hkl = load('test.hdf5')\n assert i == i_hkl\n\n j = -2**63-1\n dump(j, 'test.hdf5')\n j_hkl = load('test.hdf5')\n assert j == j_hkl\n\n\ndef test_list():\n \"\"\" Dumping and loading a list \"\"\"\n filename, mode = 'test_list.h5', 'w'\n list_obj = [1, 2, 3, 4, 5]\n dump(list_obj, filename, mode=mode)\n list_hkl = load(filename)\n try:\n assert isinstance(list_hkl, list)\n assert list_obj == list_hkl\n import h5py\n a = h5py.File(filename, 'r')\n a.close()\n\n except AssertionError:\n print(\"ERR:\", list_obj, list_hkl)\n import h5py\n\n raise\n\n\ndef test_set():\n \"\"\" Dumping and loading a list \"\"\"\n filename, mode = 'test_set.h5', 'w'\n list_obj = set([1, 0, 3, 4.5, 11.2])\n dump(list_obj, filename, mode)\n list_hkl = load(filename)\n try:\n assert isinstance(list_hkl, set)\n assert list_obj == list_hkl\n except AssertionError:\n print(type(list_obj))\n print(type(list_hkl))\n raise\n\n\ndef test_numpy():\n \"\"\" Dumping and loading numpy array \"\"\"\n filename, mode = 'test.h5', 'w'\n dtypes = ['float32', 'float64', 'complex64', 'complex128']\n\n for dt in dtypes:\n array_obj = np.ones(8, dtype=dt)\n dump(array_obj, filename, mode)\n array_hkl = load(filename)\n try:\n assert array_hkl.dtype == array_obj.dtype\n assert np.all((array_hkl, array_obj))\n except AssertionError:\n print(array_hkl)\n print(array_obj)\n raise\n\n\ndef test_masked():\n \"\"\" Test masked numpy array \"\"\"\n filename, mode = 'test.h5', 'w'\n a = np.ma.array([1, 2, 3, 4], dtype='float32', mask=[0, 1, 0, 0])\n\n dump(a, filename, mode)\n a_hkl = load(filename)\n\n try:\n assert a_hkl.dtype == a.dtype\n assert np.all((a_hkl, a))\n except AssertionError:\n print(a_hkl)\n print(a)\n raise\n\n\ndef test_object_numpy():\n \"\"\" Dumping and loading a NumPy array containing non-NumPy objects.\n\n https://github.com/telegraphic/hickle/issues/90\"\"\"\n\n arr = np.array([[NESTED_DICT], ('What is this?',), {1, 2, 3, 7, 1}],\n dtype=object)\n dump(arr, 'test.hdf5')\n arr_hkl = load('test.hdf5')\n assert np.all(arr == arr_hkl)\n\n arr2 = np.array(NESTED_DICT, dtype=object)\n dump(arr2, 'test.hdf5')\n arr_hkl2 = load('test.hdf5')\n assert np.all(arr2 == arr_hkl2)\n\n\ndef test_string_numpy():\n \"\"\" Dumping and loading NumPy arrays containing Python 3 strings. \"\"\"\n\n arr = np.array([\"1313e\", \"was\", \"maybe?\", \"here\"])\n dump(arr, 'test.hdf5')\n arr_hkl = load('test.hdf5')\n assert np.all(arr == arr_hkl)\n\n\ndef test_list_object_numpy():\n \"\"\" Dumping and loading a list of NumPy arrays with objects.\n\n https://github.com/telegraphic/hickle/issues/90\"\"\"\n\n lst = [np.array(NESTED_DICT, dtype=object),\n np.array([('What is this?',), {1, 2, 3, 7, 1}], dtype=object)]\n dump(lst, 'test.hdf5')\n lst_hkl = load('test.hdf5')\n assert np.all(lst[0] == lst_hkl[0])\n assert np.all(lst[1] == lst_hkl[1])\n\n\ndef test_dict():\n \"\"\" Test dictionary dumping and loading \"\"\"\n filename, mode = 'test.h5', 'w'\n\n dd = {\n 'name': b'Danny',\n 'age': 28,\n 'height': 6.1,\n 'dork': True,\n 'nums': [1, 2, 3],\n 'narr': np.array([1, 2, 3]),\n }\n\n dump(dd, filename, mode)\n dd_hkl = load(filename)\n\n for k in dd.keys():\n try:\n assert k in dd_hkl.keys()\n\n if isinstance(dd[k], np.ndarray):\n assert np.all((dd[k], dd_hkl[k]))\n else:\n pass\n assert isinstance(dd_hkl[k], dd[k].__class__)\n except AssertionError:\n print(k)\n print(dd_hkl[k])\n print(dd[k])\n print(type(dd_hkl[k]), type(dd[k]))\n raise\n\n\ndef test_odict():\n \"\"\" Test ordered dictionary dumping and loading\n\n https://github.com/telegraphic/hickle/issues/65\"\"\"\n filename, mode = 'test.hdf5', 'w'\n\n od = odict(((3, [3, 0.1]), (7, [5, 0.1]), (5, [3, 0.1])))\n dump(od, filename, mode)\n od_hkl = load(filename)\n\n assert od.keys() == od_hkl.keys()\n\n for od_item, od_hkl_item in zip(od.items(), od_hkl.items()):\n assert od_item == od_hkl_item\n\n\ndef test_empty_dict():\n \"\"\" Test empty dictionary dumping and loading\n\n https://github.com/telegraphic/hickle/issues/91\"\"\"\n filename, mode = 'test.h5', 'w'\n\n dump({}, filename, mode)\n assert load(filename) == {}\n\n\ndef test_compression():\n \"\"\" Test compression on datasets\"\"\"\n\n filename, mode = 'test.h5', 'w'\n dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128']\n\n comps = [None, 'gzip', 'lzf']\n\n for dt in dtypes:\n for cc in comps:\n array_obj = np.ones(32768, dtype=dt)\n dump(array_obj, filename, mode, compression=cc)\n print(cc, os.path.getsize(filename))\n array_hkl = load(filename)\n try:\n assert array_hkl.dtype == array_obj.dtype\n assert np.all((array_hkl, array_obj))\n except AssertionError:\n print(array_hkl)\n print(array_obj)\n raise\n\n\ndef test_dict_int_key():\n \"\"\" Test for dictionaries with integer keys \"\"\"\n filename, mode = 'test.h5', 'w'\n\n dd = {\n 0: \"test\",\n 1: \"test2\"\n }\n\n dump(dd, filename, mode)\n load(filename)\n\n\ndef test_dict_nested():\n \"\"\" Test for dictionaries with integer keys \"\"\"\n filename, mode = 'test.h5', 'w'\n\n dd = NESTED_DICT\n\n dump(dd, filename, mode)\n dd_hkl = load(filename)\n\n ll_hkl = dd_hkl[\"level1_3\"][\"level2_1\"][\"level3_1\"]\n ll = dd[\"level1_3\"][\"level2_1\"][\"level3_1\"]\n assert ll == ll_hkl\n\n\ndef test_masked_dict():\n \"\"\" Test dictionaries with masked arrays \"\"\"\n\n filename, mode = 'test.h5', 'w'\n\n dd = {\n \"data\": np.ma.array([1, 2, 3], mask=[True, False, False]),\n \"data2\": np.array([1, 2, 3, 4, 5])\n }\n\n dump(dd, filename, mode)\n dd_hkl = load(filename)\n\n for k in dd.keys():\n try:\n assert k in dd_hkl.keys()\n if isinstance(dd[k], np.ndarray):\n assert np.all((dd[k], dd_hkl[k]))\n elif isinstance(dd[k], np.ma.MaskedArray):\n print(dd[k].data)\n print(dd_hkl[k].data)\n assert np.allclose(dd[k].data, dd_hkl[k].data)\n assert np.allclose(dd[k].mask, dd_hkl[k].mask)\n\n assert isinstance(dd_hkl[k], dd[k].__class__)\n\n except AssertionError:\n print(k)\n print(dd_hkl[k])\n print(dd[k])\n print(type(dd_hkl[k]), type(dd[k]))\n raise\n\n\ndef test_np_float():\n \"\"\" Test for singular np dtypes \"\"\"\n filename, mode = 'np_float.h5', 'w'\n\n dtype_list = (np.float16, np.float32, np.float64,\n np.complex64, np.complex128,\n np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64)\n\n for dt in dtype_list:\n\n dd = dt(1)\n dump(dd, filename, mode)\n dd_hkl = load(filename)\n assert dd == dd_hkl\n assert dd.dtype == dd_hkl.dtype\n\n dd = {}\n for dt in dtype_list:\n dd[str(dt)] = dt(1.0)\n dump(dd, filename, mode)\n dd_hkl = load(filename)\n\n print(dd)\n for dt in dtype_list:\n assert dd[str(dt)] == dd_hkl[str(dt)]\n\n\ndef test_comp_kwargs():\n \"\"\" Test compression with some kwargs for shuffle and chunking \"\"\"\n\n filename, mode = 'test.h5', 'w'\n dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128']\n\n comps = [None, 'gzip', 'lzf']\n chunks = [(100, 100), (250, 250)]\n shuffles = [True, False]\n scaleoffsets = [0, 1, 2]\n\n for dt in dtypes:\n for cc in comps:\n for ch in chunks:\n for sh in shuffles:\n for so in scaleoffsets:\n kwargs = {\n 'compression': cc,\n 'dtype': dt,\n 'chunks': ch,\n 'shuffle': sh,\n 'scaleoffset': so\n }\n array_obj = NESTED_DICT\n dump(array_obj, filename, mode, compression=cc)\n print(kwargs, os.path.getsize(filename))\n load(filename)\n\n\ndef test_list_numpy():\n \"\"\" Test converting a list of numpy arrays \"\"\"\n\n filename, mode = 'test.h5', 'w'\n\n a = np.ones(1024)\n b = np.zeros(1000)\n c = [a, b]\n\n dump(c, filename, mode)\n dd_hkl = load(filename)\n\n print(dd_hkl)\n\n assert isinstance(dd_hkl, list)\n assert isinstance(dd_hkl[0], np.ndarray)\n\n\ndef test_tuple_numpy():\n \"\"\" Test converting a list of numpy arrays \"\"\"\n\n filename, mode = 'test.h5', 'w'\n\n a = np.ones(1024)\n b = np.zeros(1000)\n c = (a, b, a)\n\n dump(c, filename, mode)\n dd_hkl = load(filename)\n\n print(dd_hkl)\n\n assert isinstance(dd_hkl, tuple)\n assert isinstance(dd_hkl[0], np.ndarray)\n\n\ndef test_numpy_dtype():\n \"\"\" Dumping and loading a NumPy dtype \"\"\"\n\n dtype = np.dtype('float16')\n dump(dtype, 'test.hdf5')\n dtype_hkl = load('test.hdf5')\n assert dtype == dtype_hkl\n\n\ndef test_none():\n \"\"\" Test None type hickling \"\"\"\n\n filename, mode = 'test.h5', 'w'\n\n a = None\n\n dump(a, filename, mode)\n dd_hkl = load(filename)\n print(a)\n print(dd_hkl)\n\n assert isinstance(dd_hkl, type(None))\n\n\ndef test_file_open_close():\n \"\"\" https://github.com/telegraphic/hickle/issues/20 \"\"\"\n import h5py\n f = h5py.File('test.hdf', 'w')\n a = np.arange(5)\n\n dump(a, 'test.hkl')\n dump(a, 'test.hkl')\n\n dump(a, f, mode='w')\n f.close()\n try:\n dump(a, f, mode='w')\n except hickle.ClosedFileError:\n print(\"Tests: Closed file exception caught\")\n\n\ndef test_hdf5_group():\n import h5py\n file = h5py.File('test.hdf5', 'w')\n group = file.create_group('test_group')\n a = np.arange(5)\n dump(a, group)\n file.close()\n\n a_hkl = load('test.hdf5', path='/test_group')\n assert np.allclose(a_hkl, a)\n\n file = h5py.File('test.hdf5', 'r+')\n group = file.create_group('test_group2')\n b = np.arange(8)\n\n dump(b, group, path='deeper/and_deeper')\n file.close()\n\n b_hkl = load('test.hdf5', path='/test_group2/deeper/and_deeper')\n assert np.allclose(b_hkl, b)\n\n file = h5py.File('test.hdf5', 'r')\n b_hkl2 = load(file['test_group2'], path='deeper/and_deeper')\n assert np.allclose(b_hkl2, b)\n file.close()\n\n\ndef test_list_order():\n \"\"\" https://github.com/telegraphic/hickle/issues/26 \"\"\"\n d = [np.arange(n + 1) for n in range(20)]\n dump(d, 'test.h5')\n d_hkl = load('test.h5')\n\n try:\n for ii, xx in enumerate(d):\n assert d[ii].shape == d_hkl[ii].shape\n for ii, xx in enumerate(d):\n assert np.allclose(d[ii], d_hkl[ii])\n except AssertionError:\n print(d[ii], d_hkl[ii])\n raise\n\n\ndef test_embedded_array():\n \"\"\" See https://github.com/telegraphic/hickle/issues/24 \"\"\"\n\n d_orig = [[np.array([10., 20.]), np.array([10, 20, 30])],\n [np.array([10, 2]), np.array([1.])]]\n dump(d_orig, 'test.h5')\n d_hkl = load('test.h5')\n\n for ii, xx in enumerate(d_orig):\n for jj, yy in enumerate(xx):\n assert np.allclose(d_orig[ii][jj], d_hkl[ii][jj])\n\n print(d_hkl)\n print(d_orig)\n\n\n##############\n# NEW TESTS #\n###############\ndef generate_nested():\n a = [1, 2, 3]\n b = [a, a, a]\n c = [a, b, 's']\n d = [a, b, c, c, a]\n e = [d, d, d, d, 1]\n f = {'a': a, 'b': b, 'e': e}\n g = {'f': f, 'a': e, 'd': d}\n h = {'h': g, 'g': f}\n z = [f, a, b, c, d, e, f, g, h, g, h]\n a = np.array([1, 2, 3, 4])\n b = set([1, 2, 3, 4, 5])\n c = (1, 2, 3, 4, 5)\n d = np.ma.array([1, 2, 3, 4, 5, 6, 7, 8])\n z = {'a': a, 'b': b, 'c': c, 'd': d, 'z': z}\n return z\n\n\ndef test_is_iterable():\n a = [1, 2, 3]\n b = 1\n\n assert helpers.check_is_iterable(a)\n assert not helpers.check_is_iterable(b)\n\n\ndef test_check_iterable_item_type():\n a = [1, 2, 3]\n b = [a, a, a]\n c = [a, b, 's']\n\n type_a = helpers.check_iterable_item_type(a)\n type_b = helpers.check_iterable_item_type(b)\n type_c = helpers.check_iterable_item_type(c)\n\n assert type_a is int\n assert type_b is list\n assert not type_c\n\n\ndef test_dump_nested():\n \"\"\" Dump a complicated nested object to HDF5\n \"\"\"\n z = generate_nested()\n dump(z, 'test.hkl', mode='w')\n\n\ndef test_with_open_file():\n \"\"\"\n Testing dumping and loading to an open file\n\n https://github.com/telegraphic/hickle/issues/92\"\"\"\n\n lst = [1]\n tpl = (1,)\n dct = {1: 1}\n arr = np.array([1])\n\n with h5py.File('test.hkl', 'w') as file:\n dump(lst, file, path='/lst')\n dump(tpl, file, path='/tpl')\n dump(dct, file, path='/dct')\n dump(arr, file, path='/arr')\n\n with h5py.File('test.hkl', 'r') as file:\n assert load(file, '/lst') == lst\n assert load(file, '/tpl') == tpl\n assert load(file, '/dct') == dct\n assert load(file, '/arr') == arr\n\n\ndef test_load():\n a = set([1, 2, 3, 4])\n b = set([5, 6, 7, 8])\n c = set([9, 10, 11, 12])\n z = (a, b, c)\n z = [z, z]\n z = (z, z, z, z, z)\n\n print(\"Original:\")\n pprint(z)\n dump(z, 'test.hkl', mode='w')\n\n print(\"\\nReconstructed:\")\n z = load('test.hkl')\n pprint(z)\n\n\ndef test_sort_keys():\n keys = [b'data_0', b'data_1', b'data_2', b'data_3', b'data_10']\n keys_sorted = [b'data_0', b'data_1', b'data_2', b'data_3', b'data_10']\n\n print(keys)\n print(keys_sorted)\n assert helpers.sort_keys(keys) == keys_sorted\n\n\ndef test_ndarray():\n a = np.array([1, 2, 3])\n b = np.array([2, 3, 4])\n z = (a, b)\n\n print(\"Original:\")\n pprint(z)\n dump(z, 'test.hkl', mode='w')\n\n print(\"\\nReconstructed:\")\n z = load('test.hkl')\n pprint(z)\n\n\ndef test_ndarray_masked():\n a = np.ma.array([1, 2, 3])\n b = np.ma.array([2, 3, 4], mask=[True, False, True])\n z = (a, b)\n\n print(\"Original:\")\n pprint(z)\n dump(z, 'test.hkl', mode='w')\n\n print(\"\\nReconstructed:\")\n z = load('test.hkl')\n pprint(z)\n\n\ndef test_simple_dict():\n a = {'key1': 1, 'key2': 2}\n\n dump(a, 'test.hkl')\n z = load('test.hkl')\n\n pprint(a)\n pprint(z)\n\n\ndef test_complex_dict():\n a = {'akey': 1, 'akey2': 2}\n c = {'ckey': \"hello\", \"ckey2\": \"hi there\"}\n z = {'zkey1': a, 'zkey2': a, 'zkey3': c}\n\n print(\"Original:\")\n pprint(z)\n dump(z, 'test.hkl', mode='w')\n\n print(\"\\nReconstructed:\")\n z = load('test.hkl')\n pprint(z)\n\n\ndef test_multi_hickle():\n \"\"\" Dumping to and loading from the same file several times\n\n https://github.com/telegraphic/hickle/issues/20\"\"\"\n\n a = {'a': 123, 'b': [1, 2, 4]}\n\n if os.path.exists(\"test.hkl\"):\n os.remove(\"test.hkl\")\n dump(a, \"test.hkl\", path=\"/test\", mode=\"w\")\n dump(a, \"test.hkl\", path=\"/test2\", mode=\"r+\")\n dump(a, \"test.hkl\", path=\"/test3\", mode=\"r+\")\n dump(a, \"test.hkl\", path=\"/test4\", mode=\"r+\")\n\n load(\"test.hkl\", path=\"/test\")\n load(\"test.hkl\", path=\"/test2\")\n load(\"test.hkl\", path=\"/test3\")\n load(\"test.hkl\", path=\"/test4\")\n\n\ndef test_complex():\n \"\"\" Test complex value dtype is handled correctly\n\n https://github.com/telegraphic/hickle/issues/29 \"\"\"\n\n data = {\"A\": 1.5, \"B\": 1.5 + 1j, \"C\": np.linspace(0, 1, 4) + 2j}\n dump(data, \"test.hkl\")\n data2 = load(\"test.hkl\")\n for key in data.keys():\n assert isinstance(data[key], data2[key].__class__)\n\n\ndef test_nonstring_keys():\n \"\"\" Test that keys are reconstructed back to their original datatypes\n https://github.com/telegraphic/hickle/issues/36\n \"\"\"\n\n data = {\n u'test': 123,\n 'def': [b'test'],\n 'hik': np.array([1, 2, 3]),\n 0: 0,\n True: ['test'],\n 1.1: 'hey',\n 1j: 'complex_hashable',\n (1, 2): 'boo',\n ('A', 17.4, 42): [1, 7, 'A'],\n (): '1313e was here',\n '0': 0,\n None: None\n }\n\n print(data)\n dump(data, \"test.hkl\")\n data2 = load(\"test.hkl\")\n print(data2)\n\n for key in data.keys():\n assert key in data2.keys()\n\n print(data2)\n\n\ndef test_scalar_compression():\n \"\"\" Test bug where compression causes a crash on scalar datasets\n\n (Scalars are incompressible!)\n https://github.com/telegraphic/hickle/issues/37\n \"\"\"\n data = {'a': 0, 'b': np.float(2), 'c': True}\n\n dump(data, \"test.hkl\", compression='gzip')\n data2 = load(\"test.hkl\")\n\n print(data2)\n for key in data.keys():\n assert isinstance(data[key], data2[key].__class__)\n\n\ndef test_bytes():\n \"\"\" Dumping and loading a string. PYTHON3 ONLY \"\"\"\n\n filename, mode = 'test.h5', 'w'\n string_obj = b\"The quick brown fox jumps over the lazy dog\"\n dump(string_obj, filename, mode)\n string_hkl = load(filename)\n print(type(string_obj))\n print(type(string_hkl))\n assert isinstance(string_hkl, bytes)\n assert string_obj == string_hkl\n\n\ndef test_np_scalar():\n \"\"\" Numpy scalar datatype\n\n https://github.com/telegraphic/hickle/issues/50\n \"\"\"\n\n fid = 'test.h5py'\n r0 = {'test': np.float64(10.)}\n dump(r0, fid)\n r = load(fid)\n print(r)\n assert isinstance(r0['test'], r['test'].__class__)\n\n\ndef test_slash_dict_keys():\n \"\"\" Support for having slashes in dict keys\n\n https://github.com/telegraphic/hickle/issues/124\"\"\"\n dct = {'a/b': [1, '2'], 1.4: 3}\n\n dump(dct, 'test.hdf5', 'w')\n dct_hkl = load('test.hdf5')\n\n assert isinstance(dct_hkl, dict)\n for key, val in dct_hkl.items():\n assert val == dct.get(key)\n\n # Check that having backslashes in dict keys will serialize the dict\n dct2 = {'a\\\\b': [1, '2'], 1.4: 3}\n with pytest.warns(loaders.load_builtins.SerializedWarning):\n dump(dct2, 'test.hdf5')\n\n\n# %% MAIN SCRIPT\nif __name__ == '__main__':\n \"\"\" Some tests and examples \"\"\"\n test_sort_keys()\n\n test_np_scalar()\n test_scalar_compression()\n test_complex()\n test_file_open_close()\n test_hdf5_group()\n test_none()\n test_masked_dict()\n test_list()\n test_set()\n test_numpy()\n test_dict()\n test_odict()\n test_empty_dict()\n test_compression()\n test_masked()\n test_dict_nested()\n test_comp_kwargs()\n test_list_numpy()\n test_tuple_numpy()\n test_list_order()\n test_embedded_array()\n test_np_float()\n test_string()\n test_nonstring_keys()\n test_bytes()\n\n # NEW TESTS\n test_is_iterable()\n test_check_iterable_item_type()\n test_dump_nested()\n test_with_open_file()\n test_load()\n test_sort_keys()\n test_ndarray()\n test_ndarray_masked()\n test_simple_dict()\n test_complex_dict()\n test_multi_hickle()\n test_dict_int_key()\n test_local_func()\n test_binary_file()\n test_state_obj()\n test_slash_dict_keys()\n test_invalid_file()\n test_non_empty_group()\n test_numpy_dtype()\n test_object_numpy()\n test_string_numpy()\n test_list_object_numpy()\n\n # Cleanup\n print(\"ALL TESTS PASSED!\")\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.float",
"numpy.ones",
"numpy.float64",
"numpy.allclose",
"numpy.ma.array",
"numpy.arange",
"numpy.all",
"numpy.linspace",
"numpy.dtype"
]
] |
ssg-research/openfl | [
"b60cbfbdad595e653c94cee23fd35add993b94b0"
] | [
"openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/dogs_cats_shard_descriptor.py"
] | [
"# Copyright (C) 2020-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Cats and dogs shard descriptor.\"\"\"\n\nimport json\nimport os\nimport shutil\nfrom hashlib import md5\nfrom logging import getLogger\nfrom pathlib import Path\nfrom random import shuffle\nfrom typing import Optional\nfrom zipfile import ZipFile\n\nimport numpy as np\nfrom kaggle.api.kaggle_api_extended import KaggleApi\nfrom PIL import Image\n\nfrom openfl.interface.interactive_api.shard_descriptor import ShardDataset\nfrom openfl.interface.interactive_api.shard_descriptor import ShardDescriptor\n\nlogger = getLogger(__name__)\n\n\nclass DogsCatsShardDataset(ShardDataset):\n \"\"\"Dogs and cats Shard dataset class.\"\"\"\n\n def __init__(self, data_type: str, dataset_dir: Path,\n rank: int = 1, worldsize: int = 1, enforce_image_hw=None):\n \"\"\"Initialize DogsCatsShardDataset.\"\"\"\n self.rank = rank\n self.worldsize = worldsize\n self.dataset_dir = dataset_dir\n self.enforce_image_hw = enforce_image_hw\n self.img_path = self.dataset_dir / data_type\n\n self.img_names = [\n img.name\n for img in sorted(self.img_path.iterdir())\n if img.suffix == '.jpg'\n ]\n\n # Sharding\n self.img_names = self.img_names[self.rank - 1::self.worldsize]\n # Shuffling the results dataset after choose half pictures of each class\n shuffle(self.img_names)\n\n def __getitem__(self, index):\n \"\"\"Return a item by the index.\"\"\"\n name = self.img_names[index]\n # Reading data\n img = Image.open(self.img_path / name)\n img_class = 1 if name[:3] == 'dog' else 0\n assert name[:3] in {'cat', 'dog'}, 'Wrong object classification'\n\n if self.enforce_image_hw is not None:\n # If we need to resize data\n # PIL accepts (w,h) tuple, not (h,w)\n img = img.resize(self.enforce_image_hw[::-1])\n\n img = np.asarray(img)\n\n assert img.shape[2] == 3\n\n return img, np.asarray([img_class], dtype=np.uint8)\n\n def __len__(self):\n \"\"\"Return the len of the dataset.\"\"\"\n return len(self.img_names)\n\n\nclass DogsCatsShardDescriptor(ShardDescriptor):\n \"\"\"Shard descriptor class.\"\"\"\n\n def __init__(self, data_folder: str = 'data',\n rank_worldsize: str = '1,3',\n enforce_image_hw: Optional[str] = None) -> None:\n \"\"\"Initialize DogsCatsShardDescriptor.\"\"\"\n super().__init__()\n # Settings for sharding the dataset\n self.rank, self.worldsize = map(int, rank_worldsize.split(','))\n\n self.data_folder = Path.cwd() / data_folder\n self.download_dataset()\n\n # Settings for resizing data\n self.enforce_image_hw = None\n if enforce_image_hw is not None:\n self.enforce_image_hw = tuple(map(int, enforce_image_hw.split(',')))\n\n # Calculating data and target shapes\n ds = self.get_dataset()\n sample, target = ds[0]\n self._sample_shape = [str(dim) for dim in sample.shape]\n self._target_shape = [str(*target.shape)]\n\n assert self._target_shape[0] == '1', 'Target shape Error'\n\n def download_dataset(self):\n \"\"\"Download dataset from Kaggle.\"\"\"\n if not os.path.exists(self.data_folder):\n os.mkdir(self.data_folder)\n\n if not self.is_dataset_complete():\n logger.info('Your dataset is absent or damaged. Downloading ... ')\n api = KaggleApi()\n api.authenticate()\n\n if os.path.exists('data/train'):\n shutil.rmtree('data/train')\n\n api.competition_download_file(\n 'dogs-vs-cats-redux-kernels-edition',\n 'train.zip', path=self.data_folder\n )\n\n with ZipFile(self.data_folder / 'train.zip', 'r') as zipobj:\n zipobj.extractall(self.data_folder)\n\n os.remove(self.data_folder / 'train.zip')\n\n self.save_all_md5()\n\n def get_dataset(self, dataset_type='train'):\n \"\"\"Return a shard dataset by type.\"\"\"\n return DogsCatsShardDataset(\n data_type=dataset_type,\n dataset_dir=self.data_folder,\n rank=self.rank,\n worldsize=self.worldsize,\n enforce_image_hw=self.enforce_image_hw\n )\n\n def calc_all_md5(self):\n \"\"\"Calculate hash of all dataset.\"\"\"\n md5_dict = {}\n for root, _, files in os.walk(self.data_folder):\n for file in files:\n if file == 'dataset.json':\n continue\n md5_calc = md5()\n rel_dir = os.path.relpath(root, self.data_folder)\n rel_file = os.path.join(rel_dir, file)\n\n with open(self.data_folder / rel_file, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n md5_calc.update(chunk)\n md5_dict[rel_file] = md5_calc.hexdigest()\n return md5_dict\n\n def save_all_md5(self):\n \"\"\"Save dataset hash.\"\"\"\n all_md5 = self.calc_all_md5()\n with open(os.path.join(self.data_folder, 'dataset.json'), 'w') as f:\n json.dump(all_md5, f)\n\n def is_dataset_complete(self):\n \"\"\"Check dataset integrity.\"\"\"\n new_md5 = self.calc_all_md5()\n try:\n with open(os.path.join(self.data_folder, 'dataset.json'), 'r') as f:\n old_md5 = json.load(f)\n except FileNotFoundError:\n return False\n\n return new_md5 == old_md5\n\n @property\n def sample_shape(self):\n \"\"\"Return the sample shape info.\"\"\"\n return self._sample_shape\n\n @property\n def target_shape(self):\n \"\"\"Return the target shape info.\"\"\"\n return self._target_shape\n\n @property\n def dataset_description(self) -> str:\n \"\"\"Return the dataset description.\"\"\"\n return (f'Dogs and Cats dataset, shard number {self.rank} '\n f'out of {self.worldsize}')\n"
] | [
[
"numpy.asarray"
]
] |
0x00-pl/tvm | [
"f3abb3d88f3a1145a4454649b454fe0139f19bc9"
] | [
"tests/python/frontend/tflite/test_forward.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument\n\"\"\"\nTFLite testcases\n================\nThis article is a test script to test TFLite operator with Relay.\n\"\"\"\nfrom __future__ import print_function\nfrom functools import partial\nimport numpy as np\nimport tvm\nfrom tvm import relay\nimport tensorflow as tf\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variables\ntry:\n from tensorflow import lite as interpreter_wrapper\nexcept ImportError:\n from tensorflow.contrib import lite as interpreter_wrapper\n\nimport tvm.relay.testing.tf as tf_testing\nfrom packaging import version as package_version\n\n#######################################################################\n# Generic run functions for TVM & TFLite\n# --------------------------------------\ndef convert_to_list(x):\n if not isinstance(x, list):\n x = [x]\n return x\n\ndef run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target='llvm',\n out_names=None):\n \"\"\" Generic function to compile on relay and execute on tvm \"\"\"\n try:\n import tflite.Model\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n # get TFLite model from buffer\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n\n input_data = convert_to_list(input_data)\n input_node = convert_to_list(input_node)\n\n shape_dict = {}\n dtype_dict = {}\n for i, e in enumerate(input_node):\n shape_dict[e] = input_data[i].shape\n dtype_dict[e] = input_data[i].dtype.name\n\n mod, params = relay.frontend.from_tflite(tflite_model,\n shape_dict=shape_dict,\n dtype_dict=dtype_dict)\n with relay.build_config(opt_level=3):\n graph, lib, params = relay.build(mod, target, params=params)\n\n ctx = tvm.context(target, 0)\n from tvm.contrib import graph_runtime\n m = graph_runtime.create(graph, lib, ctx)\n # set inputs\n for i, e in enumerate(input_node):\n m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))\n\n m.set_input(**params)\n # execute\n m.run()\n # get outputs\n assert out_names is None or num_output == len(out_names), \"out_names: {} num_output: {}\".format(\n out_names, num_output)\n tvm_output_list = []\n for i in range(0, num_output):\n tvm_output = m.get_output(i)\n tvm_output_list.append(tvm_output.asnumpy())\n return tvm_output_list\n\n\ndef run_tflite_graph(tflite_model_buf, input_data):\n \"\"\" Generic function to execute TFLite \"\"\"\n input_data = convert_to_list(input_data)\n\n interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # set input\n assert len(input_data) == len(input_details)\n for i in range(len(input_details)):\n interpreter.set_tensor(input_details[i]['index'], input_data[i])\n\n # Run\n interpreter.invoke()\n\n # get output\n tflite_output = list()\n for i in range(len(output_details)):\n tflite_output.append(interpreter.get_tensor(output_details[i]['index']))\n\n return tflite_output\n\n\ndef compare_tflite_with_tvm(in_data, in_name, input_tensors,\n output_tensors, init_global_variables=False, out_names=None):\n \"\"\"Generic function to generate and compare TFLite and TVM output\"\"\"\n in_data = convert_to_list(in_data)\n in_name = convert_to_list(in_name)\n out_names = convert_to_list(out_names)\n in_node = [0] * len(in_name)\n for i in range(len(in_name)):\n in_node[i] = in_name[i].split(':')[0] if \":\" in in_name[i] else in_name[i]\n\n with tf.Session() as sess:\n if init_global_variables:\n sess.run(variables.global_variables_initializer())\n # convert to tflite model\n converter = interpreter_wrapper.TFLiteConverter.from_session(\n sess, input_tensors, output_tensors)\n tflite_model_buffer = converter.convert()\n tflite_output = run_tflite_graph(tflite_model_buffer, in_data)\n\n for device in [\"llvm\"]:\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n continue\n\n tvm_output = run_tvm_graph(tflite_model_buffer, in_data, in_node, target=device,\n num_output=len(out_names), out_names=out_names)\n for i in range(len(tflite_output)):\n tvm.testing.assert_allclose(tflite_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)\n\n\ndef with_fused_activation_function(input_tensor, fn_name):\n if fn_name is None or fn_name == \"NONE\":\n return input_tensor\n if fn_name == \"RELU\":\n return nn_ops.relu(input_tensor)\n if fn_name == \"RELU6\":\n return nn_ops.relu6(input_tensor)\n if fn_name == \"RELU_N1_TO_1\":\n return math_ops.maximum(-1, math_ops.minimum(input_tensor, 1))\n if fn_name == \"TANH\":\n return math_ops.tanh(input_tensor)\n raise AssertionError(\"Unknown fused_activation_function {}\".format(fn_name))\n\ndef _test_split(in_shape, axis, num_Splits, dtype):\n '''internal split tester taking as parameters in_shape, number of tensors to split into\n and dtype (data type)'''\n np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=in_shape, dtype=dtype)\n out = array_ops.split(in_data, num_Splits, axis=axis)\n out_names = ['out_' + str(n) + ':0' for n in range(num_Splits)]\n compare_tflite_with_tvm([np_data], ['Placeholder:0'], [in_data], out,\n out_names=out_names)\n\ndef test_forward_split():\n '''test split layer'''\n # rank 1\n _test_split((3,), 0, 1, 'float32')\n _test_split((3,), 0, 3, 'float32')\n _test_split((6,), 0, 3, 'float32')\n # rank 2\n _test_split((6, 2), 0, 3, 'float32')\n _test_split((2, 6), 1, 6, 'float32')\n # rank 3\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n _test_split((6, 2, 4), 0, 2, 'int32')\n\n _test_split((2, 6, 4), 1, 3, 'float32')\n _test_split((2, 4, 6), 2, 1, 'float32')\n # rank 4\n _test_split((6, 1, 3, 5), 0, 3, 'float32')\n _test_split((1, 6, 3, 5), 1, 3, 'float32')\n _test_split((1, 3, 6, 5), 2, 3, 'float32')\n _test_split((1, 3, 5, 6), 3, 3, 'float32')\n # split along negative axis\n _test_split((6, 1, 3, 5), -4, 3, 'float32')\n _test_split((1, 6, 3, 5), -3, 3, 'float32')\n _test_split((1, 3, 6, 5), -2, 3, 'float32')\n _test_split((1, 3, 5, 6), -1, 3, 'float32')\n\n#######################################################################\n# transpose\n# ---------\n\n\ndef _test_forward_transpose(ishape, axes=()):\n data = np.random.uniform(size=ishape).astype(np.float32)\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n\n if not axes:\n out = array_ops.transpose(in_data)\n else:\n out = array_ops.transpose(in_data, axes)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_transpose():\n _test_forward_transpose((2, 2))\n _test_forward_transpose((2, 3, 4))\n _test_forward_transpose((7, 8, 8, 10))\n _test_forward_transpose((2, 3, 4), (1, 2, 0))\n _test_forward_transpose((2, 3, 4), (0, 1, 2))\n _test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))\n _test_forward_transpose((2, 3, 4, 5), ())\n\n#######################################################################\n# tile\n# ---------\n\n\ndef _test_forward_tile(in_shape, reps, dtype):\n data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n\n out = array_ops.tile(in_data, reps)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_tile():\n _test_forward_tile((2, ), (3, ), \"int32\")\n _test_forward_tile((2, 2), (2, 3), \"float32\")\n\n\n#######################################################################\n# Pooling\n# -------\ndef _test_pooling_iteration(input_shape, **kwargs):\n \"\"\" One iteration of pool operation with given shapes and attributes \"\"\"\n\n x = -np.arange(\n np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=input_shape, dtype='float32')\n out = nn_ops.pool(in_data, **kwargs)\n\n compare_tflite_with_tvm(x,'Placeholder:0', [in_data], [out])\n\n\ndef _test_pooling(input_shape, **kwargs):\n _test_pooling_iteration(input_shape, **kwargs)\n\n\ndef test_forward_pooling():\n \"\"\" Pooling \"\"\"\n\n for pool_type in ['AVG', 'MAX']:\n _test_pooling(input_shape=[2, 9, 10, 2],\n window_shape=[1, 1],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[1, 1])\n\n _test_pooling(input_shape=[2, 10, 9, 2],\n window_shape=[1, 1],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[1, 1])\n\n _test_pooling(input_shape=[2, 9, 10, 2],\n window_shape=[2, 1],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[1, 1])\n\n _test_pooling(input_shape=[2, 10, 9, 2],\n window_shape=[2, 3],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[2, 1])\n\n\n#######################################################################\n# Convolution\n# -----------\n\ndef _test_convolution(tensor_in_sizes, filter_in_sizes,\n dilations, strides, padding, data_format,\n is_depthwise=False):\n \"\"\" One iteration of convolution with given shapes and attributes \"\"\"\n\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]\n filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')\n in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n\n if is_depthwise:\n out = nn_ops.depthwise_conv2d_native(in_data,\n in_filter,\n strides=strides,\n padding=padding,\n data_format=data_format)\n else:\n out = nn_ops.conv2d(in_data,\n in_filter,\n strides=strides,\n padding=padding,\n data_format=data_format)\n data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')\n compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_convolution():\n _test_convolution([4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], 'SAME', 'NHWC')\n _test_convolution([4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], 'VALID', 'NHWC')\n _test_convolution([4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], 'SAME', 'NHWC')\n _test_convolution([4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], 'VALID', 'NHWC')\n\n # depthwise convolution\n _test_convolution([4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], 'SAME', 'NHWC', True)\n _test_convolution([4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], 'VALID', 'NHWC', True)\n _test_convolution([4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], 'SAME', 'NHWC', True)\n _test_convolution([4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], 'VALID', 'NHWC', True)\n _test_convolution([4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], 'VALID', 'NHWC', True)\n\n\n#######################################################################\n# Reshape\n# -------\n\ndef _test_reshape(data, out_shape):\n \"\"\" One iteration of reshape operation with given data and out shape \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = array_ops.reshape(in_data, out_shape)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_reshape():\n _test_reshape(np.arange(6.0, dtype=np.float32), [2, 3])\n _test_reshape(np.arange(6), [-1, 2])\n _test_reshape(np.arange(6), [3, -1])\n _test_reshape(np.arange(6), [-1])\n\n\n#######################################################################\n# Resize\n# ------\n\ndef _test_resize(tf_resize_op, data, align_corners):\n \"\"\" One iteration of Resize \"\"\"\n\n assert len(data) == 2\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n images_tensor = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')\n size = ops.convert_to_tensor(data[1], dtype=data[1].dtype)\n out_tensor = tf_resize_op(images=images_tensor, size=size, align_corners=align_corners)\n compare_tflite_with_tvm([data[0]], ['in:0'], [images_tensor], [out_tensor])\n\n\ndef test_all_resize():\n \"\"\" Resize \"\"\"\n data = [np.random.rand(1, 16, 16, 3).astype(\"float32\"), np.array([8, 8], dtype=np.int32)]\n ### RESIZE_BILINEAR\n _test_resize(tf.image.resize_bilinear, data, align_corners=False)\n _test_resize(tf.image.resize_bilinear, data, align_corners=True)\n ### RESIZE_NEAREST_NEIGHBOR (was added in v1.13)\n # According to topi resize.h\n # Align corners not supported for nearest neighbour\n from tflite.BuiltinOperator import BuiltinOperator\n if 'RESIZE_NEAREST_NEIGHBOR' in dir(BuiltinOperator()):\n _test_resize(tf.image.resize_nearest_neighbor, data, align_corners=False)\n\n\n#######################################################################\n# Concatenation\n# -------------\n\ndef _test_concatenation(data, axis):\n \"\"\" One iteration of concatenation \"\"\"\n\n assert len(data) >= 1\n\n with tf.Graph().as_default():\n in_data = [\n array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name=\"in_{}\".format(idx))\n for idx, tensor in enumerate(data)]\n out = array_ops.concat(in_data, axis=axis)\n name = [\"in_{}:0\".format(idx) for idx in range(len(data))]\n\n compare_tflite_with_tvm(data, name, in_data, [out])\n\n\ndef test_forward_concatenation():\n\n _test_concatenation(\n [np.arange(6).reshape((1, 2, 1, 3)),\n np.arange(6).reshape((1, 2, 1, 3))], 1)\n\n _test_concatenation(\n [np.arange(6).reshape((3, 2)),\n np.arange(6).reshape((3, 2))], 1)\n\n _test_concatenation(\n [np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3))], 1)\n\n\n#######################################################################\n# Element-wise\n# ---\n\ndef _test_elemwise(math_op, data, fused_activation_function=None):\n \"\"\" One iteration of elemwise \"\"\"\n\n assert len(data) == 2\n\n # Test with two tensors\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in_0'),\n array_ops.placeholder(shape=data[1].shape, dtype=data[1].dtype, name='in_1')]\n out = math_op(in_data[0], in_data[1])\n out = with_fused_activation_function(out, fused_activation_function)\n compare_tflite_with_tvm(data, ['in_0:0', 'in_1:0'], in_data, [out])\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')]\n out = math_op(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype))\n out = with_fused_activation_function(out, fused_activation_function)\n compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out])\n\n\n#######################################################################\n# Add\n# ---\n\ndef _test_add(data, fused_activation_function=None):\n \"\"\" One iteration of add \"\"\"\n return _test_elemwise(math_ops.add, data, fused_activation_function)\n\n#######################################################################\n# Subtract\n# --------\n\ndef _test_sub(data, fused_activation_function=None):\n \"\"\" One iteration of subtract \"\"\"\n return _test_elemwise(math_ops.subtract, data, fused_activation_function)\n#######################################################################\n# Mul\n# ---\ndef _test_mul(data, fused_activation_function=None):\n \"\"\" One iteration of mul \"\"\"\n return _test_elemwise(math_ops.multiply, data, fused_activation_function)\n\n#######################################################################\n# Divide\n# ------\n\ndef _test_div(data, fused_activation_function=None):\n \"\"\" One iteration of divide \"\"\"\n return _test_elemwise(math_ops.divide, data, fused_activation_function)\n#######################################################################\n# Power\n# -----\n\ndef _test_pow(data):\n \"\"\" One iteration of power \"\"\"\n return _test_elemwise(math_ops.pow, data)\n#######################################################################\n# Maximum\n# -------\n\ndef _test_maximum(data):\n \"\"\" One iteration of maximum \"\"\"\n return _test_elemwise(math_ops.maximum, data)\n#######################################################################\n# Minimum\n# -------\n\ndef _test_minimum(data):\n \"\"\" One iteration of minimum \"\"\"\n return _test_elemwise(math_ops.minimum, data)\n\ndef _test_forward_elemwise(testop):\n \"\"\" Elewise\"\"\"\n testop([np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)),\n np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3))])\n testop([np.arange(6.0, dtype=np.float32).reshape((2, 1, 3)),\n np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3))])\n testop([np.arange(3.0, dtype=np.float32).reshape((1, 3)),\n np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3))])\n\ndef test_all_elemwise():\n _test_forward_elemwise(_test_add)\n _test_forward_elemwise(partial(_test_add, fused_activation_function=\"RELU\"))\n _test_forward_elemwise(partial(_test_add, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_sub)\n _test_forward_elemwise(partial(_test_sub, fused_activation_function=\"RELU\"))\n _test_forward_elemwise(partial(_test_sub, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_mul)\n _test_forward_elemwise(partial(_test_mul, fused_activation_function=\"RELU\"))\n _test_forward_elemwise(partial(_test_mul, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_div)\n _test_forward_elemwise(partial(_test_div, fused_activation_function=\"RELU\"))\n _test_forward_elemwise(partial(_test_div, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_pow)\n _test_forward_elemwise(_test_maximum)\n _test_forward_elemwise(_test_minimum)\n\n#######################################################################\n# Reduce\n# ------\n\ndef _test_reduce(math_op, data, keep_dims=None):\n \"\"\" One iteration of reduce \"\"\"\n\n assert len(data) == 2\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')\n out = math_op(in_data, data[1], keep_dims)\n compare_tflite_with_tvm([data[0]], ['in:0'], [in_data], [out])\n\n\n#######################################################################\n# Reduce_min\n# ----------\n\ndef _test_reduce_min(data, keep_dims=None):\n \"\"\" One iteration of reduce_min \"\"\"\n return _test_reduce(math_ops.reduce_min, data, keep_dims)\n\n#######################################################################\n# Reduce_max\n# ----------\n\ndef _test_reduce_max(data, keep_dims=None):\n \"\"\" One iteration of reduce_max \"\"\"\n return _test_reduce(math_ops.reduce_max, data, keep_dims)\n\n#######################################################################\n# Reduce_mean\n# -----------\n\ndef _test_reduce_mean(data, keep_dims=None):\n \"\"\" One iteration of reduce_mean \"\"\"\n return _test_reduce(math_ops.reduce_mean, data, keep_dims)\n\n#######################################################################\n# Reduce_prod\n# -----------\n\ndef _test_reduce_prod(data, keep_dims=None):\n \"\"\" One iteration of reduce_prod \"\"\"\n return _test_reduce(math_ops.reduce_prod, data, keep_dims)\n\n\ndef _test_forward_reduce(testop):\n \"\"\" Reduce \"\"\"\n data0 = [np.random.rand(16, 16, 16, 16).astype(\"float32\"), None]\n data1 = [np.random.rand(16, 16, 16, 16).astype(\"float32\"), np.array([1, 2], dtype=np.int32)]\n testop(data0)\n testop(data0, keep_dims=False)\n testop(data0, keep_dims=True)\n testop(data1)\n testop(data1, keep_dims=False)\n testop(data1, keep_dims=True)\n\n\ndef test_all_reduce():\n _test_forward_reduce(_test_reduce_min)\n _test_forward_reduce(_test_reduce_max)\n _test_forward_reduce(_test_reduce_mean)\n _test_forward_reduce(_test_reduce_prod)\n\n\n#######################################################################\n# Squeeze\n# -------\n\ndef _test_squeeze(data, squeeze_dims=None):\n \"\"\" One iteration of squeeze \"\"\"\n\n if squeeze_dims is None:\n squeeze_dims = []\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n\n if squeeze_dims:\n out = array_ops.squeeze(in_data, squeeze_dims)\n else:\n out = array_ops.squeeze(in_data)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_squeeze():\n \"\"\" Squeeze \"\"\"\n _test_squeeze(np.arange(6).reshape((1, 2, 1, 3)), [0, 2])\n _test_squeeze(np.arange(6).reshape((2, 1, 3, 1)), [1, 3])\n\n\n#######################################################################\n# Pad\n# ---\n\ndef _test_pad(data):\n \"\"\" One iteration of PAD \"\"\"\n\n assert len(data) == 2\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')]\n out = array_ops.pad(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype))\n compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out])\n\n\ndef test_forward_pad():\n \"\"\" Pad \"\"\"\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),\n np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32)])\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),\n np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32)])\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),\n np.array([[1, 1], [2, 2]], dtype=np.int32)])\n _test_pad([np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),\n np.array([[1, 1], [2, 2]], dtype=np.int32)])\n\n\n#######################################################################\n# Pack\n# -------------\n\ndef _test_pack(data, axis):\n \"\"\" One iteration of pack \"\"\"\n\n assert len(data) >= 1\n\n with tf.Graph().as_default():\n in_data = [\n array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name=\"in_{}\".format(idx))\n for idx, tensor in enumerate(data)]\n out = array_ops.pack(in_data, axis=axis)\n name = [\"in_{}:0\".format(idx) for idx in range(len(data))]\n\n compare_tflite_with_tvm(data, name, in_data, [out])\n\n\ndef test_forward_pack():\n \"\"\" Pack \"\"\"\n _test_pack(\n [np.arange(6).reshape((1, 2, 1, 3)),\n np.arange(6).reshape((1, 2, 1, 3))], 1)\n\n _test_pack(\n [np.arange(6).reshape((3, 2)),\n np.arange(6).reshape((3, 2))], 1)\n\n _test_pack(\n [np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3))], 1)\n\n\n#######################################################################\n# Logistic\n# --------\n\ndef _test_logistic(data):\n \"\"\" One iteration of LOGISTIC \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = math_ops.sigmoid(in_data)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_logistic():\n \"\"\" LOGISTIC \"\"\"\n _test_logistic(np.arange(6.0, dtype=np.float32).reshape((1, 6)))\n\n\n#######################################################################\n# Softmax\n# -------\n\ndef _test_softmax(data):\n \"\"\" One iteration of softmax \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = nn_ops.softmax(in_data)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_softmax():\n \"\"\" Softmax \"\"\"\n _test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6)))\n\n\n#######################################################################\n# Fully Connected\n# -------\n\ndef _test_fully_connected(tensor_in_sizes, filter_in_sizes, bias_in_size=None):\n \"\"\" One iteration of fully connected \"\"\"\n\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]\n filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]\n assert int(total_size_1 / tensor_in_sizes[0]) == filter_in_sizes[0], \\\n \"input size and filter size are mismatched\"\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')\n in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')\n\n # reshape N H W C into N H*W*C\n in_data_reshape = array_ops.reshape(in_data, [tensor_in_sizes[0], -1])\n\n out = math_ops.mat_mul(in_data_reshape, in_filter)\n\n # if we have bias\n if bias_in_size:\n assert bias_in_size[0] == filter_in_sizes[1], \"bias and filter size are mismatched\"\n bias_array = [f * 1.0 for f in range(1, bias_in_size[0] + 1)]\n in_bias = constant_op.constant(bias_array, shape=bias_in_size, dtype='float32')\n out = nn_ops.bias_add(out, in_bias)\n\n data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')\n compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_fully_connected():\n \"\"\" Fully Connected \"\"\"\n _test_fully_connected([1, 1, 1, 150], [150, 100])\n _test_fully_connected([1, 1, 1, 150], [150, 100], [100])\n _test_fully_connected([5, 1, 1, 150], [150, 100])\n _test_fully_connected([5, 1, 1, 150], [150, 100], [100])\n\n\n#######################################################################\n# Mobilenet\n# ---------\n\ndef test_forward_mobilenet_v1():\n \"\"\"Test the Mobilenet V1 TF Lite model.\"\"\"\n # MobilenetV1\n tflite_model_file = tf_testing.get_workload_official(\n \"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz\",\n \"mobilenet_v1_1.0_224.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\ndef test_forward_mobilenet_v2():\n \"\"\"Test the Mobilenet V2 TF Lite model.\"\"\"\n # MobilenetV2\n tflite_model_file = tf_testing.get_workload_official(\n \"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz\",\n \"mobilenet_v2_1.0_224.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\n#######################################################################\n# Inception\n# ------------\n\ndef test_forward_inception_v3_net():\n \"\"\"Test the Inception V3 TF Lite model.\"\"\"\n # InceptionV3\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz\",\n \"inception_v3.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 299, 299, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\ndef test_forward_inception_v4_net():\n \"\"\"Test the Inception V4 TF Lite model.\"\"\"\n # InceptionV4\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz\",\n \"inception_v4.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 299, 299, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\n#######################################################################\n# SSD Mobilenet\n# -------------\n\ndef test_forward_ssd_mobilenet_v1():\n \"\"\"Test the SSD Mobilenet V1 TF Lite model.\"\"\"\n # SSD MobilenetV1\n tflite_model_file = tf_testing.get_workload_official(\n \"https://raw.githubusercontent.com/dmlc/web-data/master/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28_nopp.tgz\",\n \"ssd_mobilenet_v1_coco_2018_01_28_nopp.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 300, 300, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'normalized_input_image_tensor')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\n#######################################################################\n# Main\n# ----\nif __name__ == '__main__':\n # Split\n test_forward_split()\n\n # Transpose\n test_forward_transpose()\n\n # Tile\n test_forward_tile()\n\n # Transforms\n test_forward_concatenation()\n test_forward_pad()\n test_forward_pack()\n test_forward_reshape()\n test_all_resize()\n test_forward_squeeze()\n\n # NN\n test_forward_convolution()\n test_forward_logistic()\n test_forward_pooling()\n test_forward_softmax()\n test_forward_fully_connected()\n\n # Elemwise\n test_all_elemwise()\n\n # Reduce\n test_all_reduce()\n\n # End to End\n test_forward_mobilenet_v1()\n test_forward_mobilenet_v2()\n test_forward_inception_v3_net()\n test_forward_inception_v4_net()\n test_forward_ssd_mobilenet_v1()\n"
] | [
[
"tensorflow.python.ops.math_ops.mat_mul",
"numpy.random.rand",
"tensorflow.python.ops.nn_ops.pool",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.tanh",
"tensorflow.python.ops.math_ops.sigmoid",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.arange",
"numpy.prod",
"tensorflow.python.ops.nn_ops.softmax",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.contrib.lite.Interpreter",
"numpy.array",
"numpy.reshape",
"tensorflow.python.ops.nn_ops.bias_add",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.contrib.lite.TFLiteConverter.from_session",
"tensorflow.Session",
"tensorflow.python.ops.array_ops.squeeze",
"numpy.squeeze",
"tensorflow.python.ops.nn_ops.relu6",
"tensorflow.Graph",
"tensorflow.python.ops.nn_ops.relu",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.transpose",
"numpy.random.uniform",
"tensorflow.python.ops.nn_ops.conv2d",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.pack",
"tensorflow.python.ops.nn_ops.depthwise_conv2d_native"
]
] |
burntyellow/adelman_ci | [
"cca251a51b34843faed0275cce01d7a307829993"
] | [
"src/westpa/core/data_manager.py"
] | [
"\"\"\"\nHDF5 data manager for WEST.\n\nOriginal HDF5 implementation: Joseph W. Kaus\nCurrent implementation: Matthew C. Zwier\n\nWEST exclusively uses the cross-platform, self-describing file format HDF5\nfor data storage. This ensures that data is stored efficiently and portably\nin a manner that is relatively straightforward for other analysis tools\n(perhaps written in C/C++/Fortran) to access.\n\nThe data is laid out in HDF5 as follows:\n - summary -- overall summary data for the simulation\n - /iterations/ -- data for individual iterations, one group per iteration under /iterations\n - iter_00000001/ -- data for iteration 1\n - seg_index -- overall information about segments in the iteration, including weight\n - pcoord -- progress coordinate data organized as [seg_id][time][dimension]\n - wtg_parents -- data used to reconstruct the split/merge history of trajectories\n - recycling -- flux and event count for recycled particles, on a per-target-state basis\n - aux_data/ -- auxiliary datasets (data stored on the 'data' field of Segment objects)\n\nThe file root object has an integer attribute 'west_file_format_version' which can be used to\ndetermine how to access data even as the file format (i.e. organization of data within HDF5 file)\nevolves.\n\nVersion history:\n Version 7\n - Removed bin_assignments, bin_populations, and bin_rates from iteration group.\n - Added new_segments subgroup to iteration group\n Version 6\n - ???\n Version 5\n - moved iter_* groups into a top-level iterations/ group,\n - added in-HDF5 storage for basis states, target states, and generated states\n\"\"\"\n\nimport logging\nimport pickle\nimport posixpath\nimport sys\nimport threading\nimport time\nfrom operator import attrgetter\n\nimport h5py\nfrom h5py import h5s\nimport numpy as np\n\nfrom . import h5io\nfrom . segment import Segment\nfrom . states import BasisState, TargetState, InitialState\nfrom . we_driver import NewWeightEntry\n\nimport westpa\n\n\nlog = logging.getLogger(__name__)\n\nfile_format_version = 7\n\n\nclass flushing_lock:\n def __init__(self, lock, fileobj):\n self.lock = lock\n self.fileobj = fileobj\n\n def __enter__(self):\n self.lock.acquire()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.fileobj.flush()\n self.lock.release()\n\nclass expiring_flushing_lock:\n def __init__(self, lock, flush_method, nextsync):\n self.lock = lock\n self.flush_method = flush_method\n self.nextsync = nextsync\n\n def __enter__(self):\n self.lock.acquire()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if time.time() > self.nextsync:\n self.flush_method()\n self.lock.release()\n\n\n# Data types for use in the HDF5 file\nseg_id_dtype = np.int64 # Up to 9 quintillion segments per iteration; signed so that initial states can be stored negative\nn_iter_dtype = np.uint32 # Up to 4 billion iterations\nweight_dtype = np.float64 # about 15 digits of precision in weights\nutime_dtype = np.float64 # (\"u\" for Unix time) Up to ~10^300 cpu-seconds\nvstr_dtype = h5py.special_dtype(vlen=str)\nh5ref_dtype = h5py.special_dtype(ref=h5py.Reference)\nbinhash_dtype = np.dtype('|S64')\n\n#seg_status_dtype = h5py.special_dtype(enum=(np.uint8, Segment.statuses))\n#seg_initpoint_dtype = h5py.special_dtype(enum=(np.uint8, Segment.initpoint_types))\n#seg_endpoint_dtype = h5py.special_dtype(enum=(np.uint8, Segment.endpoint_types))\n#istate_type_dtype = h5py.special_dtype(enum=(np.uint8, InitialState.istate_types))\n#istate_status_dtype = h5py.special_dtype(enum=(np.uint8, InitialState.istate_statuses))\n\nseg_status_dtype = np.uint8\nseg_initpoint_dtype = np.uint8\nseg_endpoint_dtype = np.uint8\nistate_type_dtype = np.uint8\nistate_status_dtype = np.uint8\n\nsummary_table_dtype = np.dtype( [ ('n_particles', seg_id_dtype), # Number of live trajectories in this iteration\n ('norm', weight_dtype), # Norm of probability, to watch for errors or drift\n ('min_bin_prob', weight_dtype), # Per-bin minimum probability\n ('max_bin_prob', weight_dtype), # Per-bin maximum probability\n ('min_seg_prob', weight_dtype), # Per-segment minimum probability\n ('max_seg_prob', weight_dtype), # Per-segment maximum probability\n ('cputime', utime_dtype), # Total CPU time for this iteration\n ('walltime', utime_dtype),# Total wallclock time for this iteration\n ('binhash', binhash_dtype),\n ] )\n\n\n# The HDF5 file tracks two distinct, but related, histories:\n# (1) the evolution of the trajectory, which requires only an identifier\n# of where a segment's initial state comes from (the \"history graph\");\n# this is stored as the parent_id field of the seg index\n# (2) the flow of probability due to splits, merges, and recycling events,\n# which can be thought of as an adjacency list (the \"weight graph\")\n# segment ID is implied by the row in the index table, and so is not stored\n# initpoint_type remains implicitly stored as negative IDs (if parent_id < 0, then init_state_id = -(parent_id+1)\nseg_index_dtype = np.dtype( [ ('weight', weight_dtype), # Statistical weight of this segment\n ('parent_id', seg_id_dtype), # ID of parent (for trajectory history)\n ('wtg_n_parents', np.uint), # number of parents this segment has in the weight transfer graph\n ('wtg_offset', np.uint), # offset into the weight transfer graph dataset\n ('cputime', utime_dtype), # CPU time used in propagating this segment\n ('walltime', utime_dtype), # Wallclock time used in propagating this segment\n ('endpoint_type', seg_endpoint_dtype), # Endpoint type (will continue, merged, or recycled)\n ('status', seg_status_dtype), # Status of propagation of this segment\n ] )\n\n# Index to basis/initial states\nibstate_index_dtype = np.dtype([('iter_valid', np.uint),\n ('n_bstates', np.uint),\n ('group_ref', h5ref_dtype)])\n\n# Basis state index type\nbstate_dtype = np.dtype( [ ('label', vstr_dtype), # An optional descriptive label\n ('probability', weight_dtype), # Probability that this state will be selected\n ('auxref', vstr_dtype), # An optional auxiliar data reference\n ])\n\n# Even when initial state generation is off and basis states are passed through directly, an initial state entry\n# is created, as that allows precise tracing of the history of a given state in the most complex case of\n# a new initial state for every new trajectory.\nistate_dtype = np.dtype( [('iter_created', np.uint), # Iteration during which this state was generated (0 for at w_init)\n ('iter_used', np.uint), # When this state was used to start a new trajectory\n ('basis_state_id', seg_id_dtype), # Which basis state this state was generated from\n ('istate_type', istate_type_dtype), # What type this initial state is (generated or basis)\n ('istate_status', istate_status_dtype), # Whether this initial state is ready to go\n ])\n\ntstate_index_dtype = np.dtype([('iter_valid', np.uint), # Iteration when this state list is valid\n ('n_states', np.uint),\n ('group_ref', h5ref_dtype)]) # Reference to a group containing further data; this will be the\n # null reference if there is no target state for that timeframe.\ntstate_dtype = np.dtype( [('label', vstr_dtype),]) # An optional descriptive label for this state\n\n# Support for west.we_driver.NewWeightEntry\nnw_source_dtype = np.uint8\nnw_index_dtype = np.dtype([('source_type', nw_source_dtype),\n ('weight', weight_dtype),\n ('prev_seg_id', seg_id_dtype),\n ('target_state_id', seg_id_dtype),\n ('initial_state_id', seg_id_dtype)])\n\n# Storage of bin identities\nbinning_index_dtype = np.dtype([('hash', binhash_dtype),\n ('pickle_len', np.uint32)])\n\nclass WESTDataManager:\n \"\"\"Data manager for assisiting the reading and writing of WEST data from/to HDF5 files.\"\"\"\n\n # defaults for various options\n default_iter_prec = 8\n default_we_h5filename = 'west.h5'\n default_we_h5file_driver = None\n default_flush_period = 60\n\n # Compress any auxiliary dataset whose total size (across all segments) is more than 1MB\n default_aux_compression_threshold = 1048576\n\n # Bin data horizontal (second dimension) chunk size\n binning_hchunksize = 4096\n\n # Number of rows to retrieve during a table scan\n table_scan_chunksize = 1024\n\n def flushing_lock(self):\n return flushing_lock(self.lock, self.we_h5file)\n\n def expiring_flushing_lock(self):\n next_flush = self.last_flush + self.flush_period\n return expiring_flushing_lock(self.lock, self.flush_backing, next_flush)\n\n def process_config(self):\n config = self.rc.config\n\n for (entry, type_) in [('iter_prec', int)]:\n config.require_type_if_present(['west', 'data', entry], type_)\n\n self.we_h5filename = config.get_path(['west', 'data', 'west_data_file'], default=self.default_we_h5filename)\n self.we_h5file_driver = config.get_choice(['west', 'data', 'west_data_file_driver'], [None, 'sec2', 'family'],\n default=self.default_we_h5file_driver,\n value_transform=(lambda x: x.lower() if x else None))\n self.iter_prec = config.get(['west', 'data', 'iter_prec'], self.default_iter_prec)\n self.aux_compression_threshold = config.get(['west','data','aux_compression_threshold'],\n self.default_aux_compression_threshold)\n self.flush_period = config.get(['west','data','flush_period'], self.default_flush_period)\n\n # Process dataset options\n dsopts_list = config.get(['west','data','datasets']) or []\n for dsopts in dsopts_list:\n dsopts = normalize_dataset_options(dsopts, path_prefix='auxdata' if dsopts['name']!='pcoord' else '')\n try:\n self.dataset_options[dsopts['name']].update(dsopts)\n except KeyError:\n self.dataset_options[dsopts['name']] = dsopts\n\n if 'pcoord' in self.dataset_options:\n if self.dataset_options['pcoord']['h5path'] != 'pcoord':\n raise ValueError('cannot override pcoord storage location')\n\n def __init__(self, rc=None):\n\n self.rc = rc or westpa.rc\n\n self.we_h5filename = self.default_we_h5filename\n self.we_h5file_driver = self.default_we_h5file_driver\n self.we_h5file_version = None\n self.h5_access_mode = 'r+'\n self.iter_prec = self.default_iter_prec\n self.aux_compression_threshold = self.default_aux_compression_threshold\n\n self.we_h5file = None\n\n self.lock = threading.RLock()\n self.flush_period = None\n self.last_flush = 0\n\n self._system = None\n\n self.dataset_options = {}\n self.process_config()\n\n @property\n def system(self):\n if self._system is None:\n self._system = self.rc.get_system_driver()\n return self._system\n\n @system.setter\n def system(self, system):\n self._system = system\n\n @property\n def closed(self):\n return (self.we_h5file is None)\n\n def iter_group_name(self, n_iter, absolute=True):\n if absolute:\n return '/iterations/iter_{:0{prec}d}'.format(int(n_iter), prec=self.iter_prec)\n else:\n return 'iter_{:0{prec}d}'.format(int(n_iter), prec=self.iter_prec)\n\n def require_iter_group(self, n_iter):\n '''Get the group associated with n_iter, creating it if necessary.'''\n with self.lock:\n iter_group = self.we_h5file.require_group('/iterations/iter_{:0{prec}d}'.format(int(n_iter), prec=self.iter_prec))\n iter_group.attrs['n_iter'] = n_iter\n return iter_group\n\n def del_iter_group(self, n_iter):\n with self.lock:\n del self.we_h5file['/iterations/iter_{:0{prec}d}'.format(int(n_iter), prec=self.iter_prec)]\n\n def get_iter_group(self, n_iter):\n with self.lock:\n try:\n return self.we_h5file['/iterations/iter_{:0{prec}d}'.format(int(n_iter), prec=self.iter_prec)]\n except KeyError:\n return self.we_h5file['/iter_{:0{prec}d}'.format(int(n_iter),prec=self.iter_prec)]\n\n def get_seg_index(self, n_iter):\n with self.lock:\n seg_index = self.get_iter_group(n_iter)['seg_index']\n return seg_index\n\n @property\n def current_iteration(self):\n with self.lock:\n h5file_attrs = self.we_h5file['/'].attrs\n h5file_attr_keys = list(h5file_attrs.keys())\n\n if 'west_current_iteration' in h5file_attr_keys:\n return int(self.we_h5file['/'].attrs['west_current_iteration'])\n else:\n return int(self.we_h5file['/'].attrs['wemd_current_iteration'])\n\n @current_iteration.setter\n def current_iteration(self, n_iter):\n with self.lock:\n self.we_h5file['/'].attrs['west_current_iteration'] = n_iter\n\n def open_backing(self, mode=None):\n '''Open the (already-created) HDF5 file named in self.west_h5filename.'''\n mode = mode or self.h5_access_mode\n if not self.we_h5file:\n log.debug('attempting to open {} with mode {}'.format(self.we_h5filename, mode))\n self.we_h5file = h5io.WESTPAH5File(self.we_h5filename, mode, driver=self.we_h5file_driver)\n\n h5file_attrs = self.we_h5file['/'].attrs\n h5file_attr_keys = list(h5file_attrs.keys())\n\n if 'west_iter_prec' in h5file_attr_keys:\n self.iter_prec = int(h5file_attrs['west_iter_prec'])\n elif 'wemd_iter_prec' in h5file_attr_keys:\n self.iter_prec = int(h5file_attrs['wemd_iter_prec'])\n else:\n log.info('iteration precision not stored in HDF5; using {:d}'.format(self.iter_prec))\n\n if 'west_file_format_version' in h5file_attr_keys:\n self.we_h5file_version = h5file_attrs['west_file_format_version']\n elif 'wemd_file_format_version' in h5file_attr_keys:\n self.we_h5file_version = h5file_attrs['wemd_file_format_version']\n else:\n log.info('WEST HDF5 file format version not stored, assuming 0')\n self.we_h5file_version = 0\n\n log.debug('opened WEST HDF5 file version {:d}'.format(self.we_h5file_version))\n\n def prepare_backing(self): #istates):\n '''Create new HDF5 file'''\n self.we_h5file = h5py.File(self.we_h5filename, 'w', driver=self.we_h5file_driver)\n\n with self.flushing_lock():\n self.we_h5file['/'].attrs['west_file_format_version'] = file_format_version\n self.we_h5file['/'].attrs['west_iter_prec'] = self.iter_prec\n self.current_iteration = 0\n self.we_h5file['/'].create_dataset('summary',\n shape=(1,),\n dtype=summary_table_dtype,\n maxshape=(None,))\n self.we_h5file.create_group('/iterations')\n\n def close_backing(self):\n if self.we_h5file is not None:\n with self.lock:\n self.we_h5file.close()\n self.we_h5file = None\n\n def flush_backing(self):\n if self.we_h5file is not None:\n with self.lock:\n self.we_h5file.flush()\n self.last_flush = time.time()\n\n def save_target_states(self, tstates, n_iter=None):\n '''Save the given target states in the HDF5 file; they will be used for the next iteration to\n be propagated. A complete set is required, even if nominally appending to an existing set,\n which simplifies the mapping of IDs to the table.'''\n\n system = westpa.rc.get_system_driver()\n\n n_iter = n_iter or self.current_iteration\n\n # Assemble all the important data before we start to modify the HDF5 file\n tstates = list(tstates)\n if tstates:\n state_table = np.empty((len(tstates),), dtype=tstate_dtype)\n state_pcoords = np.empty((len(tstates),system.pcoord_ndim), dtype=system.pcoord_dtype)\n for i, state in enumerate(tstates):\n state.state_id = i\n state_table[i]['label'] = state.label\n state_pcoords[i] = state.pcoord\n else:\n state_table = None\n state_pcoords = None\n\n # Commit changes to HDF5\n with self.lock:\n master_group = self.we_h5file.require_group('tstates')\n\n try:\n master_index = master_group['index']\n except KeyError:\n master_index = master_group.create_dataset('index', shape=(1,), maxshape=(None,),\n dtype=tstate_index_dtype)\n n_sets = 1\n else:\n n_sets = len(master_index) + 1\n master_index.resize((n_sets,))\n\n set_id = n_sets-1\n master_index_row = master_index[set_id]\n master_index_row['iter_valid'] = n_iter\n master_index_row['n_states'] = len(tstates)\n\n if tstates:\n state_group = master_group.create_group(str(set_id))\n master_index_row['group_ref'] = state_group.ref\n state_group['index'] = state_table\n state_group['pcoord'] = state_pcoords\n else:\n master_index_row['group_ref'] = None\n\n master_index[set_id] = master_index_row\n\n def _find_multi_iter_group(self, n_iter, master_group_name):\n with self.lock:\n master_group = self.we_h5file[master_group_name]\n master_index = master_group['index'][...]\n set_id = np.digitize([n_iter], master_index['iter_valid']) - 1\n group_ref = master_index[set_id]['group_ref']\n\n # Check if reference is Null\n if not bool(group_ref):\n return None\n\n # This extra [0] is to work around a bug in h5py\n try:\n group = self.we_h5file[group_ref]\n except AttributeError:\n group = self.we_h5file[group_ref[0]]\n else:\n log.debug('h5py fixed; remove alternate code path')\n log.debug('reference {!r} points to group {!r}'.format(group_ref, group))\n return group\n\n def find_tstate_group(self, n_iter):\n return self._find_multi_iter_group(n_iter, 'tstates')\n\n def find_ibstate_group(self, n_iter):\n return self._find_multi_iter_group(n_iter, 'ibstates')\n\n def get_target_states(self, n_iter):\n '''Return a list of Target objects representing the target (sink) states that are in use for iteration n_iter.\n Future iterations are assumed to continue from the most recent set of states.'''\n\n with self.lock:\n tstate_group = self.find_tstate_group(n_iter)\n\n if tstate_group is not None:\n tstate_index = tstate_group['index'][...]\n tstate_pcoords = tstate_group['pcoord'][...]\n\n tstates = [TargetState(state_id=i, label=str(row['label']), pcoord=pcoord.copy())\n for (i, (row, pcoord)) in enumerate(zip(tstate_index, tstate_pcoords))]\n else:\n tstates = []\n\n return tstates\n\n def create_ibstate_group(self, basis_states, n_iter=None):\n '''Create the group used to store basis states and initial states (whose definitions are always\n coupled). This group is hard-linked into all iteration groups that use these basis and\n initial states.'''\n\n with self.lock:\n n_iter = n_iter or self.current_iteration\n master_group = self.we_h5file.require_group('ibstates')\n\n try:\n master_index = master_group['index']\n except KeyError:\n master_index = master_group.create_dataset('index', dtype=ibstate_index_dtype,\n shape=(1,), maxshape=(None,))\n n_sets = 1\n else:\n n_sets = len(master_index)+1\n master_index.resize((n_sets,))\n\n set_id = n_sets - 1\n master_index_row = master_index[set_id]\n master_index_row['iter_valid'] = n_iter\n master_index_row['n_bstates'] = len(basis_states)\n state_group = master_group.create_group(str(set_id))\n master_index_row['group_ref'] = state_group.ref\n\n\n if basis_states:\n system = westpa.rc.get_system_driver()\n state_table = np.empty((len(basis_states),), dtype=bstate_dtype)\n state_pcoords = np.empty((len(basis_states),system.pcoord_ndim), dtype=system.pcoord_dtype)\n for i, state in enumerate(basis_states):\n state.state_id = i\n state_table[i]['label'] = state.label\n state_table[i]['probability'] = state.probability\n state_table[i]['auxref'] = state.auxref or ''\n state_pcoords[i] = state.pcoord\n\n state_group['bstate_index'] = state_table\n state_group['bstate_pcoord'] = state_pcoords\n\n master_index[set_id] = master_index_row\n return state_group\n\n\n def get_basis_states(self, n_iter=None):\n '''Return a list of BasisState objects representing the basis states that are in use for iteration n_iter.'''\n\n with self.lock:\n n_iter = n_iter or self.current_iteration\n ibstate_group = self.find_ibstate_group(n_iter)\n try:\n bstate_index = ibstate_group['bstate_index'][...]\n except KeyError:\n return []\n bstate_pcoords = ibstate_group['bstate_pcoord'][...]\n bstates = [BasisState(state_id=i, label=row['label'], probability=row['probability'],\n auxref = str(row['auxref']) or None, pcoord=pcoord.copy())\n for (i, (row, pcoord)) in enumerate(zip(bstate_index, bstate_pcoords))]\n return bstates\n\n\n def create_initial_states(self, n_states, n_iter=None):\n '''Create storage for ``n_states`` initial states associated with iteration ``n_iter``, and\n return bare InitialState objects with only state_id set.'''\n\n system = westpa.rc.get_system_driver()\n with self.lock:\n n_iter = n_iter or self.current_iteration\n ibstate_group = self.find_ibstate_group(n_iter)\n\n try:\n istate_index = ibstate_group['istate_index']\n except KeyError:\n istate_index = ibstate_group.create_dataset('istate_index', dtype=istate_dtype,\n shape=(n_states,), maxshape=(None,))\n istate_pcoords = ibstate_group.create_dataset('istate_pcoord', dtype=system.pcoord_dtype,\n shape=(n_states,system.pcoord_ndim),\n maxshape=(None,system.pcoord_ndim))\n len_index = len(istate_index)\n first_id = 0\n else:\n first_id = len(istate_index)\n len_index = len(istate_index) + n_states\n istate_index.resize((len_index,))\n istate_pcoords = ibstate_group['istate_pcoord']\n istate_pcoords.resize((len_index,system.pcoord_ndim))\n\n\n index_entries = istate_index[first_id:len_index]\n new_istates = []\n for irow, row in enumerate(index_entries):\n row['iter_created'] = n_iter\n row['istate_status'] = InitialState.ISTATE_STATUS_PENDING\n new_istates.append(InitialState(state_id=first_id+irow, basis_state_id=None,\n iter_created=n_iter, istate_status=InitialState.ISTATE_STATUS_PENDING))\n istate_index[first_id:len_index] = index_entries\n return new_istates\n\n def update_initial_states(self, initial_states, n_iter = None):\n '''Save the given initial states in the HDF5 file'''\n\n system = westpa.rc.get_system_driver()\n initial_states = sorted(initial_states,key=attrgetter('state_id'))\n if not initial_states:\n return\n\n with self.lock:\n n_iter = n_iter or self.current_iteration\n ibstate_group = self.find_ibstate_group(n_iter)\n state_ids = [state.state_id for state in initial_states]\n index_entries = ibstate_group['istate_index'][state_ids]\n pcoord_vals = np.empty((len(initial_states), system.pcoord_ndim), dtype=system.pcoord_dtype)\n for i, initial_state in enumerate(initial_states):\n index_entries[i]['iter_created'] = initial_state.iter_created\n index_entries[i]['iter_used'] = initial_state.iter_used or InitialState.ISTATE_UNUSED\n index_entries[i]['basis_state_id'] = initial_state.basis_state_id if initial_state.basis_state_id is not None else -1\n index_entries[i]['istate_type'] = initial_state.istate_type or InitialState.ISTATE_TYPE_UNSET\n index_entries[i]['istate_status'] = initial_state.istate_status or InitialState.ISTATE_STATUS_PENDING\n pcoord_vals[i] = initial_state.pcoord\n\n ibstate_group['istate_index'][state_ids] = index_entries\n ibstate_group['istate_pcoord'][state_ids] = pcoord_vals\n\n def get_initial_states(self, n_iter=None):\n states = []\n with self.lock:\n n_iter = n_iter or self.current_iteration\n ibstate_group = self.find_ibstate_group(n_iter)\n try:\n istate_index = ibstate_group['istate_index'][...]\n except KeyError:\n return []\n istate_pcoords = ibstate_group['pcoord'][...]\n\n for state_id, (state, pcoord) in enumerate(zip(istate_index, istate_pcoords)):\n states.append(InitialState(state_id=state_id, basis_state_id=int(state['basis_state_id']),\n iter_created=int(state['iter_created']), iter_used=int(state['iter_used']),\n istate_type=int(state['istate_type']), pcoord=pcoord.copy()))\n return states\n\n def get_segment_initial_states(self, segments, n_iter=None):\n '''Retrieve all initial states referenced by the given segments.'''\n\n with self.lock:\n n_iter = n_iter or self.current_iteration\n ibstate_group = self.get_iter_group(n_iter)['ibstates']\n\n istate_ids = {-int(segment.parent_id+1) for segment in segments if segment.parent_id < 0}\n sorted_istate_ids = sorted(istate_ids)\n if not sorted_istate_ids:\n return []\n\n istate_rows = ibstate_group['istate_index'][sorted_istate_ids][...]\n istate_pcoords = ibstate_group['istate_pcoord'][sorted_istate_ids][...]\n istates = []\n\n for state_id, state, pcoord in zip(sorted_istate_ids, istate_rows, istate_pcoords):\n istate = InitialState(state_id=state_id, basis_state_id=int(state['basis_state_id']),\n iter_created=int(state['iter_created']), iter_used=int(state['iter_used']),\n istate_type=int(state['istate_type']), pcoord=pcoord.copy())\n istates.append(istate)\n return istates\n\n def get_unused_initial_states(self, n_states = None, n_iter = None):\n '''Retrieve any prepared but unused initial states applicable to the given iteration.\n Up to ``n_states`` states are returned; if ``n_states`` is None, then all unused states\n are returned.'''\n\n n_states = n_states or sys.maxsize\n ISTATE_UNUSED = InitialState.ISTATE_UNUSED\n ISTATE_STATUS_PREPARED = InitialState.ISTATE_STATUS_PREPARED\n with self.lock:\n n_iter = n_iter or self.current_iteration\n ibstate_group = self.find_ibstate_group(n_iter)\n istate_index = ibstate_group['istate_index']\n istate_pcoords = ibstate_group['istate_pcoord']\n n_index_entries = istate_index.len()\n chunksize = self.table_scan_chunksize\n\n states = []\n istart = 0\n while istart < n_index_entries and len(states) < n_states:\n istop = min(istart+chunksize, n_index_entries)\n istate_chunk = istate_index[istart:istop]\n pcoord_chunk = istate_pcoords[istart:istop]\n #state_ids = np.arange(istart,istop,dtype=np.uint)\n\n for ci in range(len(istate_chunk)):\n row = istate_chunk[ci]\n pcoord = pcoord_chunk[ci]\n state_id = istart+ci\n if row['iter_used'] == ISTATE_UNUSED and row['istate_status'] == ISTATE_STATUS_PREPARED:\n istate = InitialState(state_id = state_id,\n basis_state_id=int(row['basis_state_id']),\n iter_created = int(row['iter_created']), iter_used=0,\n istate_type = int(row['istate_type']),\n pcoord=pcoord.copy(),\n istate_status=ISTATE_STATUS_PREPARED)\n states.append(istate)\n del row, pcoord, state_id\n istart += chunksize\n del istate_chunk, pcoord_chunk #, state_ids, unused, ids_of_unused\n log.debug('found {:d} unused states'.format(len(states)))\n return states[:n_states]\n\n def prepare_iteration(self, n_iter, segments):\n \"\"\"Prepare for a new iteration by creating space to store the new iteration's data.\n The number of segments, their IDs, and their lineage must be determined and included\n in the set of segments passed in.\"\"\"\n\n log.debug('preparing HDF5 group for iteration %d (%d segments)' % (n_iter, len(segments)))\n\n # Ensure we have a list for guaranteed ordering\n segments = list(segments)\n n_particles = len(segments)\n system = self.system\n pcoord_ndim = system.pcoord_ndim\n pcoord_len = system.pcoord_len\n pcoord_dtype = system.pcoord_dtype\n\n with self.lock:\n # Create a table of summary information about each iteration\n summary_table = self.we_h5file['summary']\n if len(summary_table) < n_iter:\n summary_table.resize((n_iter+1,))\n\n iter_group = self.require_iter_group(n_iter)\n\n for linkname in ('seg_index', 'pcoord', 'wtgraph'):\n try:\n del iter_group[linkname]\n except KeyError:\n pass\n\n # everything indexed by [particle] goes in an index table\n seg_index_table_ds = iter_group.create_dataset('seg_index', shape=(n_particles,),\n dtype=seg_index_dtype)\n # unfortunately, h5py doesn't like in-place modification of individual fields; it expects\n # tuples. So, construct everything in a numpy array and then dump the whole thing into hdf5\n # In fact, this appears to be an h5py best practice (collect as much in ram as possible and then dump)\n seg_index_table = seg_index_table_ds[...]\n\n summary_row = np.zeros((1,), dtype=summary_table_dtype)\n summary_row['n_particles'] = n_particles\n summary_row['norm'] = np.add.reduce(list(map(attrgetter('weight'), segments)))\n summary_table[n_iter-1] = summary_row\n\n # pcoord is indexed as [particle, time, dimension]\n pcoord_opts = self.dataset_options.get('pcoord',{'name': 'pcoord',\n 'h5path': 'pcoord',\n 'compression': False})\n shape = (n_particles, pcoord_len, pcoord_ndim)\n pcoord_ds = create_dataset_from_dsopts(iter_group, pcoord_opts, shape, pcoord_dtype)\n pcoord = np.empty((n_particles, pcoord_len, pcoord_ndim), pcoord_dtype)\n\n\n total_parents = 0\n for (seg_id, segment) in enumerate(segments):\n if segment.seg_id is not None:\n assert segment.seg_id == seg_id\n else:\n segment.seg_id = seg_id\n # Parent must be set, though what it means depends on initpoint_type\n assert segment.parent_id is not None\n segment.seg_id = seg_id\n seg_index_table[seg_id]['status'] = segment.status\n seg_index_table[seg_id]['weight'] = segment.weight\n seg_index_table[seg_id]['parent_id'] = segment.parent_id\n seg_index_table[seg_id]['wtg_n_parents'] = len(segment.wtg_parent_ids)\n seg_index_table[seg_id]['wtg_offset'] = total_parents\n total_parents += len(segment.wtg_parent_ids)\n\n # Assign progress coordinate if any exists\n if segment.pcoord is not None:\n if len(segment.pcoord) == 1:\n # Initial pcoord\n pcoord[seg_id,0,:] = segment.pcoord[0,:]\n elif segment.pcoord.shape != pcoord.shape[1:]:\n raise ValueError('segment pcoord shape [%r] does not match expected shape [%r]'\n % (segment.pcoord.shape, pcoord.shape[1:]))\n else:\n pcoord[seg_id,...] = segment.pcoord\n\n\n if total_parents > 0:\n wtgraph_ds = iter_group.create_dataset('wtgraph', (total_parents,), seg_id_dtype,\n compression='gzip', shuffle=True)\n parents = np.empty((total_parents,), seg_id_dtype)\n\n for (seg_id, segment) in enumerate(segments):\n offset = seg_index_table[seg_id]['wtg_offset']\n extent = seg_index_table[seg_id]['wtg_n_parents']\n parent_list = list(segment.wtg_parent_ids)\n parents[offset:offset+extent] = parent_list[:]\n\n assert set(parents[offset:offset+extent]) == set(segment.wtg_parent_ids)\n\n wtgraph_ds[:] = parents\n\n # Create convenient hard links\n self.update_iter_group_links(n_iter)\n\n\n # Since we accumulated many of these changes in RAM (and not directly in HDF5), propagate\n # the changes out to HDF5\n seg_index_table_ds[:] = seg_index_table\n pcoord_ds[...] = pcoord\n\n def update_iter_group_links(self, n_iter):\n '''Update the per-iteration hard links pointing to the tables of target and initial/basis states for the\n given iteration. These links are not used by this class, but are remarkably convenient for third-party\n analysis tools and hdfview.'''\n\n with self.lock:\n iter_group = self.require_iter_group(n_iter)\n\n for linkname in ('ibstates', 'tstates'):\n try:\n del iter_group[linkname]\n except KeyError:\n pass\n\n iter_group['ibstates'] = self.find_ibstate_group(n_iter)\n\n tstate_group = self.find_tstate_group(n_iter)\n if tstate_group is not None:\n iter_group['tstates'] = tstate_group\n\n def get_iter_summary(self,n_iter=None):\n n_iter = n_iter or self.current_iteration\n with self.lock:\n return self.we_h5file['summary'][n_iter-1]\n\n def update_iter_summary(self,summary,n_iter=None):\n n_iter = n_iter or self.current_iteration\n with self.lock:\n self.we_h5file['summary'][n_iter-1] = summary\n\n def del_iter_summary(self, min_iter): #delete the iterations starting at min_iter\n with self.lock:\n self.we_h5file['summary'].resize((min_iter - 1,))\n\n def update_segments(self, n_iter, segments):\n '''Update segment information in the HDF5 file; all prior information for each\n ``segment`` is overwritten, except for parent and weight transfer information.'''\n\n segments = sorted(segments, key=attrgetter('seg_id'))\n\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n\n pc_dsid = iter_group['pcoord'].id\n si_dsid = iter_group['seg_index'].id\n\n seg_ids = [segment.seg_id for segment in segments]\n n_segments = len(segments)\n n_total_segments = si_dsid.shape[0]\n system = self.system\n pcoord_ndim = system.pcoord_ndim\n pcoord_len = system.pcoord_len\n pcoord_dtype = system.pcoord_dtype\n\n seg_index_entries = np.empty((n_segments,), dtype=seg_index_dtype)\n pcoord_entries = np.empty((n_segments,pcoord_len,pcoord_ndim), dtype=pcoord_dtype)\n\n pc_msel = h5s.create_simple(pcoord_entries.shape, (h5s.UNLIMITED,)*pcoord_entries.ndim)\n pc_msel.select_all()\n si_msel = h5s.create_simple(seg_index_entries.shape, (h5s.UNLIMITED,))\n si_msel.select_all()\n pc_fsel = pc_dsid.get_space()\n si_fsel = si_dsid.get_space()\n\n for iseg in range(n_segments):\n seg_id = seg_ids[iseg]\n op = h5s.SELECT_OR if iseg != 0 else h5s.SELECT_SET\n si_fsel.select_hyperslab((seg_id,), (1,), op=op)\n pc_fsel.select_hyperslab((seg_id,0,0), (1,pcoord_len,pcoord_ndim), op=op)\n\n # read summary data so that we have valud parent and weight transfer information\n si_dsid.read(si_msel, si_fsel, seg_index_entries)\n\n for (iseg, (segment, ientry)) in enumerate(zip(segments,seg_index_entries)):\n ientry['status'] = segment.status\n ientry['endpoint_type'] = segment.endpoint_type or Segment.SEG_ENDPOINT_UNSET\n ientry['cputime'] = segment.cputime\n ientry['walltime'] = segment.walltime\n ientry['weight'] = segment.weight\n\n pcoord_entries[iseg] = segment.pcoord\n\n # write progress coordinates and index using low level HDF5 functions for efficiency\n si_dsid.write(si_msel,si_fsel,seg_index_entries)\n pc_dsid.write(pc_msel,pc_fsel,pcoord_entries)\n\n # Now, to deal with auxiliary data\n # If any segment has any auxiliary data, then the aux dataset must spring into\n # existence. Each is named according to the name in segment.data, and has shape\n # (n_total_segs, ...) where the ... is the shape of the data in segment.data (and may be empty\n # in the case of scalar data) and dtype is taken from the data type of the data entry\n # compression is on by default for datasets that will be more than 1MiB\n\n # a mapping of data set name to (per-segment shape, data type) tuples\n dsets = {}\n\n # First we scan for presence, shape, and data type of auxiliary data sets\n for segment in segments:\n if segment.data:\n for dsname in segment.data:\n data = np.asarray(segment.data[dsname],order='C')\n segment.data[dsname] = data\n dsets[dsname] = (data.shape, data.dtype)\n\n # Then we iterate over data sets and store data\n if dsets:\n for (dsname, (shape, dtype)) in dsets.items():\n #dset = self._require_aux_dataset(iter_group, dsname, n_total_segments, shape, dtype)\n try:\n dsopts = self.dataset_options[dsname]\n except KeyError:\n dsopts = normalize_dataset_options({'name': dsname}, path_prefix='auxdata')\n\n shape = (n_total_segments,) + shape\n dset = require_dataset_from_dsopts(iter_group, dsopts, shape, dtype,\n autocompress_threshold=self.aux_compression_threshold, n_iter=n_iter)\n if dset is None:\n # storage is suppressed\n continue\n for segment in segments:\n try:\n auxdataset = segment.data[dsname]\n except KeyError:\n pass\n else:\n source_rank = len(auxdataset.shape)\n source_sel = h5s.create_simple(auxdataset.shape, (h5s.UNLIMITED,)*source_rank)\n source_sel.select_all()\n dest_sel = dset.id.get_space()\n dest_sel.select_hyperslab((segment.seg_id,)+(0,)*source_rank, (1,)+auxdataset.shape)\n dset.id.write(source_sel, dest_sel, auxdataset)\n if 'delram' in list(dsopts.keys()):\n del dsets[dsname]\n\n def get_segments(self, n_iter=None, seg_ids=None, load_pcoords = True):\n '''Return the given (or all) segments from a given iteration.\n\n If the optional parameter ``load_auxdata`` is true, then all auxiliary datasets\n available are loaded and mapped onto the ``data`` dictionary of each segment. If\n ``load_auxdata`` is None, then use the default ``self.auto_load_auxdata``, which can\n be set by the option ``load_auxdata`` in the ``[data]`` section of ``west.cfg``. This\n essentially requires as much RAM as there is per-iteration auxiliary data, so this\n behavior is not on by default.'''\n\n\n n_iter = n_iter or self.current_iteration\n file_version = self.we_h5file_version\n\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n seg_index_ds = iter_group['seg_index']\n\n if file_version < 5:\n all_parent_ids = iter_group['parents'][...]\n else:\n all_parent_ids = iter_group['wtgraph'][...]\n\n if seg_ids is not None:\n seg_ids = list(sorted(seg_ids))\n seg_index_entries = seg_index_ds[seg_ids]\n if load_pcoords:\n pcoord_entries = iter_group['pcoord'][seg_ids]\n else:\n seg_ids = list(range(len(seg_index_ds)))\n seg_index_entries = seg_index_ds[...]\n if load_pcoords:\n pcoord_entries = iter_group['pcoord'][...]\n\n segments = []\n\n for iseg, (seg_id, row) in enumerate(zip(seg_ids, seg_index_entries)):\n segment = Segment(seg_id = seg_id,\n n_iter = n_iter,\n status = int(row['status']),\n endpoint_type = int(row['endpoint_type']),\n walltime = float(row['walltime']),\n cputime = float(row['cputime']),\n weight = float(row['weight']))\n\n if load_pcoords:\n segment.pcoord = pcoord_entries[iseg]\n\n if file_version < 5:\n wtg_n_parents = row['n_parents']\n wtg_offset = row['parents_offset']\n wtg_parent_ids = all_parent_ids[wtg_offset:wtg_offset+wtg_n_parents]\n segment.parent_id = int(wtg_parent_ids[0])\n else:\n wtg_n_parents = row['wtg_n_parents']\n wtg_offset = row['wtg_offset']\n wtg_parent_ids = all_parent_ids[wtg_offset:wtg_offset+wtg_n_parents]\n segment.parent_id = int(row['parent_id'])\n segment.wtg_parent_ids = set(map(int,wtg_parent_ids))\n assert len(segment.wtg_parent_ids) == wtg_n_parents\n segments.append(segment)\n del all_parent_ids\n if load_pcoords:\n del pcoord_entries\n\n # If any other data sets are requested, load them as well\n for dsinfo in self.dataset_options.values():\n if dsinfo.get('load', False):\n dsname = dsinfo['name']\n ds = iter_group[dsinfo['h5path']]\n for (seg_id, segment) in enumerate(segments):\n segment.data[dsname] = ds[seg_id]\n\n return segments\n\n def get_all_parent_ids(self, n_iter):\n file_version = self.we_h5file_version\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n seg_index = iter_group['seg_index']\n\n if file_version < 5:\n offsets = seg_index['parents_offset']\n all_parents = iter_group['parents'][...]\n return all_parents.take(offsets)\n else:\n return seg_index['parent_id']\n\n def get_parent_ids(self, n_iter, seg_ids=None):\n '''Return a sequence of the parent IDs of the given seg_ids.'''\n\n file_version = self.we_h5file_version\n\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n seg_index = iter_group['seg_index']\n\n if seg_ids is None:\n seg_ids = range(len(seg_index))\n\n if file_version < 5:\n offsets = seg_index['parents_offset']\n all_parents = iter_group['parents'][...]\n return [all_parents[offsets[seg_id]] for seg_id in seg_ids]\n else:\n all_parents = seg_index['parent_id']\n return [all_parents[seg_id] for seg_id in seg_ids]\n\n def get_weights(self, n_iter, seg_ids):\n '''Return the weights associated with the given seg_ids'''\n\n unique_ids = sorted(set(seg_ids))\n if not unique_ids:\n return []\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n index_subset = iter_group['seg_index'][unique_ids]\n weight_map = dict(zip(unique_ids, index_subset['weight']))\n return [weight_map[seg_id] for seg_id in seg_ids]\n\n def get_child_ids(self, n_iter, seg_id):\n '''Return the seg_ids of segments who have the given segment as a parent.'''\n\n with self.lock:\n if n_iter == self.current_iteration: return []\n\n iter_group = self.get_iter_group(n_iter+1)\n seg_index = iter_group['seg_index']\n seg_ids = np.arange(len(seg_index), dtype=seg_id_dtype)\n\n if self.we_h5file_version < 5:\n offsets = seg_index['parents_offset']\n all_parent_ids = iter_group['parents'][...]\n parent_ids = np.array([all_parent_ids[offset] for offset in offsets])\n else:\n parent_ids = seg_index['parent_id']\n\n return seg_ids[parent_ids == seg_id]\n\n def get_children(self, segment):\n '''Return all segments which have the given segment as a parent'''\n\n if segment.n_iter == self.current_iteration: return []\n\n # Examine the segment index from the following iteration to see who has this segment\n # as a parent. We don't need to worry about the number of parents each segment\n # has, since each has at least one, and indexing on the offset into the parents array\n # gives the primary parent ID\n\n with self.lock:\n iter_group = self.get_iter_group(segment.n_iter+1)\n seg_index = iter_group['seg_index'][...]\n\n # This is one of the slowest pieces of code I've ever written...\n #seg_index = iter_group['seg_index'][...]\n #seg_ids = [seg_id for (seg_id,row) in enumerate(seg_index)\n # if all_parent_ids[row['parents_offset']] == segment.seg_id]\n #return self.get_segments_by_id(segment.n_iter+1, seg_ids)\n if self.we_h5file_version < 5:\n parents = iter_group['parents'][seg_index['parent_offsets']]\n else:\n parents = seg_index['parent_id']\n all_seg_ids = np.arange(seg_index.len(), dtype=np.uintp)\n seg_ids = all_seg_ids[parents == segment.seg_id]\n # the above will return a scalar if only one is found, so convert\n # to a list if necessary\n try:\n len(seg_ids)\n except TypeError:\n seg_ids = [seg_ids]\n\n return self.get_segments(segment.n_iter+1, seg_ids)\n\n # The following are dictated by the SimManager interface\n def prepare_run(self):\n self.open_backing()\n\n def finalize_run(self):\n self.flush_backing()\n self.close_backing()\n\n def save_new_weight_data(self, n_iter, new_weights):\n '''Save a set of NewWeightEntry objects to HDF5. Note that this should\n be called for the iteration in which the weights appear in their\n new locations (e.g. for recycled walkers, the iteration following\n recycling).'''\n\n if not new_weights:\n return\n\n system = westpa.rc.get_system_driver()\n\n index = np.empty(len(new_weights), dtype=nw_index_dtype)\n prev_init_pcoords = system.new_pcoord_array(len(new_weights))\n prev_final_pcoords = system.new_pcoord_array(len(new_weights))\n new_init_pcoords = system.new_pcoord_array(len(new_weights))\n\n for ientry, nwentry in enumerate(new_weights):\n row = index[ientry]\n row['source_type'] = nwentry.source_type\n row['weight'] = nwentry.weight\n row['prev_seg_id'] = nwentry.prev_seg_id\n # the following use -1 as a sentinel for a missing value\n row['target_state_id'] = nwentry.target_state_id if nwentry.target_state_id is not None else -1\n row['initial_state_id'] = nwentry.initial_state_id if nwentry.initial_state_id is not None else -1\n\n index[ientry] = row\n\n if nwentry.prev_init_pcoord is not None:\n prev_init_pcoords[ientry] = nwentry.prev_init_pcoord\n\n if nwentry.prev_final_pcoord is not None:\n prev_final_pcoords[ientry] = nwentry.prev_final_pcoord\n\n if nwentry.new_init_pcoord is not None:\n new_init_pcoords[ientry] = nwentry.new_init_pcoord\n\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n try:\n del iter_group['new_weights']\n except KeyError:\n pass\n\n nwgroup = iter_group.create_group('new_weights')\n nwgroup['index'] = index\n nwgroup['prev_init_pcoord'] = prev_init_pcoords\n nwgroup['prev_final_pcoord'] = prev_final_pcoords\n nwgroup['new_init_pcoord'] = new_init_pcoords\n\n def get_new_weight_data(self, n_iter):\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n\n try:\n nwgroup = iter_group['new_weights']\n except KeyError:\n return []\n\n try:\n index = nwgroup['index'][...]\n prev_init_pcoords = nwgroup['prev_init_pcoord'][...]\n prev_final_pcoords = nwgroup['prev_final_pcoord'][...]\n new_init_pcoords = nwgroup['new_init_pcoord'][...]\n except (KeyError,ValueError): #zero-length selections raise ValueError\n return []\n\n entries = []\n for i in range(len(index)):\n irow = index[i]\n\n prev_seg_id = irow['prev_seg_id']\n if prev_seg_id == -1: prev_seg_id = None\n\n initial_state_id = irow['initial_state_id']\n if initial_state_id == -1: initial_state_id = None\n\n target_state_id = irow['target_state_id']\n if target_state_id == -1: target_state_id = None\n\n entry = NewWeightEntry(source_type=irow['source_type'],\n weight=irow['weight'],\n prev_seg_id=prev_seg_id,\n prev_init_pcoord=prev_init_pcoords[i].copy(),\n prev_final_pcoord=prev_final_pcoords[i].copy(),\n new_init_pcoord=new_init_pcoords[i].copy(),\n target_state_id=target_state_id,\n initial_state_id=initial_state_id)\n\n entries.append(entry)\n return entries\n\n def find_bin_mapper(self, hashval):\n '''Check to see if the given has value is in the binning table. Returns the index in the\n bin data tables if found, or raises KeyError if not.'''\n\n try:\n hashval = hashval.hexdigest()\n except AttributeError:\n pass\n\n with self.lock:\n # these will raise KeyError if the group doesn't exist, which also means\n # that bin data is not available, so no special treatment here\n try:\n binning_group = self.we_h5file['/bin_topologies']\n index = binning_group['index']\n except KeyError:\n raise KeyError('hash {} not found'.format(hashval))\n\n n_entries = len(index)\n if n_entries == 0:\n raise KeyError('hash {} not found'.format(hashval))\n\n chunksize = self.table_scan_chunksize\n for istart in range(0,n_entries,chunksize):\n chunk = index[istart:min(istart+chunksize,n_entries)]\n for i in range(len(chunk)):\n if chunk[i]['hash'] == hashval:\n return istart+i\n\n raise KeyError('hash {} not found'.format(hashval))\n\n def get_bin_mapper(self, hashval):\n '''Look up the given hash value in the binning table, unpickling and returning the corresponding\n bin mapper if available, or raising KeyError if not.'''\n\n # Convert to a hex digest if we need to\n try:\n hashval = hashval.hexdigest()\n except AttributeError:\n pass\n\n with self.lock:\n # these will raise KeyError if the group doesn't exist, which also means\n # that bin data is not available, so no special treatment here\n try:\n binning_group = self.we_h5file['/bin_topologies']\n index = binning_group['index']\n pkl = binning_group['pickles']\n except KeyError:\n raise KeyError('hash {} not found. Could not retrieve binning group'.format(hashval))\n\n n_entries = len(index)\n if n_entries == 0:\n raise KeyError('hash {} not found. No entries in index'.format(hashval))\n\n chunksize = self.table_scan_chunksize\n\n for istart in range(0, n_entries, chunksize):\n chunk = index[istart:min(istart+chunksize, n_entries)]\n for i in range(len(chunk)):\n if chunk[i]['hash'] == hashval:\n pkldat = bytes(pkl[istart+i, 0:chunk[i]['pickle_len']].data)\n mapper = pickle.loads(pkldat)\n log.debug('loaded {!r} from {!r}'.format(mapper, binning_group))\n log.debug('hash value {!r}'.format(hashval))\n return mapper\n\n raise KeyError('hash {} not found'.format(hashval))\n\n def save_bin_mapper(self, hashval, pickle_data):\n '''Store the given mapper in the table of saved mappers. If the mapper cannot be stored,\n PickleError will be raised. Returns the index in the bin data tables where the mapper is stored.'''\n\n try:\n hashval = hashval.hexdigest()\n except AttributeError:\n pass\n pickle_data = bytes(pickle_data)\n\n # First, scan to see if the mapper already is in the HDF5 file\n try:\n return self.find_bin_mapper(hashval)\n except KeyError:\n pass\n\n # At this point, we have a valid pickle and know it's not stored\n with self.lock:\n binning_group = self.we_h5file.require_group('/bin_topologies')\n\n try:\n index = binning_group['index']\n pickle_ds = binning_group['pickles']\n except KeyError:\n index = binning_group.create_dataset('index', shape=(1,), maxshape=(None,), dtype=binning_index_dtype)\n pickle_ds = binning_group.create_dataset('pickles', dtype=np.uint8,\n shape=(1,len(pickle_data)), maxshape=(None,None), chunks=(1,4096),\n compression='gzip', compression_opts=9)\n n_entries = 1\n else:\n n_entries = len(index) + 1\n index.resize((n_entries,))\n new_hsize = max(pickle_ds.shape[1], len(pickle_data))\n pickle_ds.resize((n_entries,new_hsize))\n\n index_row = index[n_entries-1]\n index_row['hash'] = hashval\n index_row['pickle_len'] = len(pickle_data)\n index[n_entries-1] = index_row\n pickle_ds[n_entries-1,:len(pickle_data)] = memoryview(pickle_data)\n return n_entries-1\n\n def save_iter_binning(self, n_iter, hashval, pickled_mapper, target_counts):\n '''Save information about the binning used to generate segments for iteration n_iter.'''\n\n with self.lock:\n iter_group = self.get_iter_group(n_iter)\n\n try:\n del iter_group['bin_target_counts']\n except KeyError:\n pass\n\n iter_group['bin_target_counts'] = target_counts\n\n if hashval and pickled_mapper:\n self.save_bin_mapper(hashval, pickled_mapper)\n iter_group.attrs['binhash'] = hashval\n else:\n iter_group.attrs['binhash'] = ''\n\ndef normalize_dataset_options(dsopts, path_prefix='', n_iter=0):\n dsopts = dict(dsopts)\n\n ds_name = dsopts['name']\n if path_prefix:\n default_h5path = '{}/{}'.format(path_prefix,ds_name)\n else:\n default_h5path = ds_name\n\n dsopts.setdefault('h5path', default_h5path)\n dtype = dsopts.get('dtype')\n if dtype:\n if isinstance(dtype, str):\n dsopts['dtype'] = np.dtype(getattr(np, dtype))\n else:\n dsopts['dtype'] = np.dtype(dtype)\n\n dsopts['store'] = bool(dsopts['store']) if 'store' in dsopts else True\n dsopts['load'] = bool(dsopts['load']) if 'load' in dsopts else False\n\n return dsopts\n\ndef create_dataset_from_dsopts(group, dsopts, shape=None, dtype=None, data=None, autocompress_threshold=None, n_iter=None):\n #log.debug('create_dataset_from_dsopts(group={!r}, dsopts={!r}, shape={!r}, dtype={!r}, data={!r}, autocompress_threshold={!r})'\n # .format(group,dsopts,shape,dtype,data,autocompress_threshold))\n if not dsopts.get('store',True):\n return None\n\n if 'file' in list(dsopts.keys()):\n import h5py\n# dsopts['file'] = str(dsopts['file']).format(n_iter=n_iter)\n h5_auxfile = h5io.WESTPAH5File(dsopts['file'].format(n_iter=n_iter))\n h5group = group\n if not (\"iter_\" + str(n_iter).zfill(8)) in h5_auxfile:\n h5_auxfile.create_group(\"iter_\" + str(n_iter).zfill(8))\n group = h5_auxfile[('/' + \"iter_\" + str(n_iter).zfill(8))]\n\n h5path = dsopts['h5path']\n containing_group_name = posixpath.dirname(h5path)\n h5_dsname = posixpath.basename(h5path)\n\n # ensure arguments are sane\n if not shape and data is None:\n raise ValueError('either shape or data must be provided')\n elif data is None and (shape and dtype is None):\n raise ValueError('both shape and dtype must be provided when data is not provided')\n elif shape and data is not None and not data.shape == shape:\n raise ValueError('explicit shape {!r} does not match data shape {!r}'.format(shape, data.shape))\n\n if data is not None:\n shape = data.shape\n if dtype is None:\n dtype = data.dtype\n # end argument sanity checks\n\n # figure out where to store this data\n if containing_group_name:\n containing_group = group.require_group(containing_group_name)\n else:\n containing_group = group\n\n # has user requested an explicit data type?\n # the extra np.dtype is an idempotent operation on true dtype\n # objects, but ensures that things like np.float32, which are\n # actually NOT dtype objects, become dtype objects\n h5_dtype = np.dtype(dsopts.get('dtype', dtype))\n\n compression = None\n scaleoffset = None\n shuffle = False\n\n # compress if 1) explicitly requested, or 2) dataset size exceeds threshold and\n # compression not explicitly prohibited\n compression_directive = dsopts.get('compression')\n if compression_directive is None:\n # No directive\n nbytes = np.multiply.reduce(shape)*h5_dtype.itemsize\n if autocompress_threshold and nbytes > autocompress_threshold:\n compression = 9\n elif compression_directive == 0: # includes False\n # Compression prohibited\n compression = None\n else: # compression explicitly requested\n compression = compression_directive\n\n # Is scale/offset requested?\n scaleoffset = dsopts.get('scaleoffset', None)\n if scaleoffset is not None:\n scaleoffset = int(scaleoffset)\n\n # We always shuffle if we compress (losslessly)\n if compression:\n shuffle = True\n else:\n shuffle = False\n\n need_chunks = any([compression,scaleoffset is not None,shuffle])\n\n # We use user-provided chunks if available\n chunks_directive = dsopts.get('chunks')\n if chunks_directive is None:\n chunks = None\n elif chunks_directive is True:\n chunks = calc_chunksize(shape, h5_dtype)\n elif chunks_directive is False:\n chunks = None\n else:\n chunks = tuple(chunks_directive[i] if chunks_directive[i] <= shape[i] else shape[i] for i in range(len(shape)))\n\n if not chunks and need_chunks:\n chunks = calc_chunksize(shape, h5_dtype)\n\n opts = {'shape': shape,\n 'dtype': h5_dtype,\n 'compression': compression,\n 'shuffle': shuffle,\n 'chunks': chunks}\n\n try:\n import h5py._hl.filters\n h5py._hl.filters._COMP_FILTERS['scaleoffset']\n except (ImportError,KeyError,AttributeError):\n # filter not available, or an unexpected version of h5py\n # use lossless compression instead\n opts['compression'] = True\n else:\n opts['scaleoffset'] = scaleoffset\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug('requiring aux dataset {!r}, shape={!r}, opts={!r}'\n .format(h5_dsname, shape, opts))\n\n dset = containing_group.require_dataset(h5_dsname, **opts)\n\n if data is not None:\n dset[...] = data\n\n if 'file' in list(dsopts.keys()):\n import h5py\n if not dsopts['h5path'] in h5group:\n h5group[dsopts['h5path']] = h5py.ExternalLink(dsopts['file'].format(n_iter=n_iter), (\"/\" +\"iter_\" + str(n_iter).zfill(8) + \"/\" + dsopts['h5path']))\n\n return dset\n\n\ndef require_dataset_from_dsopts(group, dsopts, shape=None, dtype=None, data=None, autocompress_threshold=None, n_iter=None):\n if not dsopts.get('store',True):\n return None\n try:\n return group[dsopts['h5path']]\n except KeyError:\n return create_dataset_from_dsopts(group,dsopts,shape=shape,dtype=dtype,data=data,\n autocompress_threshold=autocompress_threshold, n_iter=n_iter)\n\n\n\n\ndef calc_chunksize(shape, dtype, max_chunksize=262144):\n '''Calculate a chunk size for HDF5 data, anticipating that access will slice\n along lower dimensions sooner than higher dimensions.'''\n\n chunk_shape = list(shape)\n for idim in range(len(shape)):\n chunk_nbytes = np.multiply.reduce(chunk_shape)*dtype.itemsize\n while chunk_shape[idim] > 1 and chunk_nbytes > max_chunksize:\n chunk_shape[idim] >>= 1 # divide by 2\n chunk_nbytes = np.multiply.reduce(chunk_shape)*dtype.itemsize\n\n if chunk_nbytes <= max_chunksize:\n break\n\n chunk_shape = tuple(chunk_shape)\n log.debug('selected chunk shape {} for data set of type {} shaped {} (chunk size = {} bytes)'\n .format(chunk_shape, dtype, shape, chunk_nbytes))\n return chunk_shape\n"
] | [
[
"numpy.array",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.multiply.reduce",
"numpy.digitize",
"numpy.dtype"
]
] |
kartik4949/tinygrad | [
"ac229ea750371f7480c4e477d49949944361a001"
] | [
"tinygrad/tensor.py"
] | [
"# inspired by https://github.com/karpathy/micrograd/blob/master/micrograd/engine.py\nimport sys\nimport inspect\nimport functools\nimport os\nfrom collections import defaultdict\nimport numpy as np\n\n# **** profiler ****\n\nDEBUG = os.getenv(\"DEBUG\", None) is not None\nif DEBUG:\n import atexit, time\n debug_counts, debug_times = defaultdict(int), defaultdict(float)\n def print_debug_exit():\n for name, _ in sorted(debug_times.items(), key=lambda x: -x[1]):\n print(f\"{name:>20} : {debug_counts[name]:>6} {debug_times[name]:>10.2f} ms\")\n atexit.register(print_debug_exit)\n\nclass ProfileOp:\n def __init__(self, name, x, backward=False):\n self.name, self.x = f\"back_{name}\" if backward else name, x\n def __enter__(self):\n if DEBUG: self.st = time.time()\n def __exit__(self, *junk):\n if DEBUG:\n if cl_queue is not None:\n cl_queue.finish()\n et = (time.time()-self.st)*1000.\n debug_counts[self.name] += 1\n debug_times[self.name] += et\n print(f\"{self.name:>20} : {et:>7.2f} ms {[y.shape for y in self.x]}\")\n\n# **** GPU functions ****\n\ncl_ctx, cl_queue = None, None\ndef require_init_gpu():\n if not GPU: raise Exception(\"No GPU Support, install pyopencl\")\n global cl_ctx, cl_queue\n if cl_queue is None:\n devices = cl.get_platforms()[0].get_devices(device_type=cl.device_type.GPU)\n if len(devices) == 0:\n devices = cl.get_platforms()[0].get_devices(device_type=cl.device_type.CPU)\n cl_ctx = cl.Context(devices=devices)\n # this is an in-order command queue\n cl_queue = cl.CommandQueue(cl_ctx)\n\nclass GPUBuffer:\n def __init__(self, shape, hostbuf=None):\n self.shape, self.dtype = tuple(shape), np.float32\n self.cl = hostbuf.cl if isinstance(hostbuf, GPUBuffer) else \\\n cl.Buffer(cl_ctx, cl.mem_flags.READ_WRITE | (cl.mem_flags.COPY_HOST_PTR if hostbuf is not None else 0), 4*np.prod(shape),\n hostbuf=hostbuf.astype(np.float32).ravel() if hostbuf is not None else None)\n\n def __repr__(self):\n return f\"<GPUBuffer with shape {self.shape!r}>\"\n\n# **** ANE functions ****\n\nane = None\ndef require_init_ane():\n global ane\n if ane is None:\n import ane.lib.ane, tinygrad.ops_ane\n ane = ane.lib.ane.ANE()\n\n# **** start with two base classes, Tensor and Function ****\n\nclass Device: CPU, GPU, ANE = 0, 1, 2\n\nDEFAULT_DEVICE = Device.CPU if os.environ.get(\"GPU\", 0) != \"1\" else Device.GPU\n\nclass Tensor:\n did_float_warning = False\n training = True\n ops = defaultdict(dict)\n\n def __init__(self, data, device=DEFAULT_DEVICE, requires_grad=True):\n self.device, self.data = device, self._move_data(data, device)\n\n self.grad, self.requires_grad = None, requires_grad\n\n # internal variables used for autograd graph construction\n self._ctx = None\n\n def __repr__(self):\n return f\"<Tensor {self.data!r} with grad {(self.grad.data if self.grad else None)!r}>\"\n\n def assign(self, x):\n self.data = x.data\n\n @property\n def shape(self):\n return self.data.shape\n\n @property\n def dtype(self):\n return self.data.dtype\n\n # ***** creation helper functions *****\n\n @classmethod\n def zeros(cls, *shape, **kwargs):\n return cls(np.zeros(shape, dtype=np.float32), **kwargs)\n\n @classmethod\n def ones(cls, *shape, **kwargs):\n return cls(np.ones(shape, dtype=np.float32), **kwargs)\n\n @classmethod\n def randn(cls, *shape, **kwargs):\n return cls(np.random.randn(*shape).astype(np.float32), **kwargs)\n\n @classmethod\n def uniform(cls, *shape, **kwargs):\n return cls((np.random.uniform(-1., 1., size=shape)/np.sqrt(np.prod(shape))).astype(np.float32), **kwargs)\n\n @classmethod\n def eye(cls, dim, **kwargs):\n return cls(np.eye(dim).astype(np.float32), **kwargs)\n\n # ***** toposort and backward pass *****\n\n def deepwalk(self, visited: set, nodes: list):\n visited.add(self)\n if self._ctx:\n [i.deepwalk(visited, nodes) for i in self._ctx.parents if i not in visited]\n nodes.append(self)\n return nodes\n\n def backward(self):\n assert self.shape == (1,)\n\n # fill in the first grad with one\n # this is \"implicit gradient creation\"\n self.grad = Tensor(np.ones(self.shape, dtype=self.dtype), device=self.device, requires_grad=False)\n\n for t0 in reversed(self.deepwalk(set(), [])):\n assert (t0.grad is not None)\n with ProfileOp(t0._ctx.__class__.__name__, [t0.grad], backward=True):\n grads = t0._ctx.backward(t0._ctx, t0.grad.data)\n if len(t0._ctx.parents) == 1:\n grads = [grads]\n for t, g in zip(t0._ctx.parents, grads):\n if g is not None:\n assert g.shape == t.shape, \\\n f\"grad shape must match tensor shape in {self._ctx!r}, {g.shape!r} != {t.shape!r}\"\n gt = Tensor(g, device=self.device, requires_grad=False)\n t.grad = gt if t.grad is None else (t.grad + gt)\n\n # ***** tinygrad supports CPU and GPU *****\n\n @staticmethod\n def _move_data(data, device):\n if isinstance(data, GPUBuffer):\n if device == Device.GPU: return data\n old = data\n data = np.empty(old.shape, dtype=np.float32)\n with ProfileOp(\"toCPU\", [data]):\n cl.enqueue_copy(cl_queue, data, old.cl, is_blocking=True)\n\n elif \"ANETensor\" in str(type(data)):\n if device == Device.ANE: return data\n with ProfileOp(\"toCPU\", [data]):\n data = data.data().astype(np.float32)\n\n if not isinstance(data, np.ndarray):\n data = np.array(data, dtype=np.float32)\n\n if data.dtype != np.float32 and not Tensor.did_float_warning:\n # warning? float64 is actually needed for numerical jacobian\n print(f\"warning, {data.shape!r} isn't float32\")\n Tensor.did_float_warning = True\n\n if device == Device.GPU:\n require_init_gpu()\n with ProfileOp(\"toGPU\", [data]):\n return GPUBuffer(data.shape, data)\n\n elif device == Device.ANE:\n require_init_ane()\n with ProfileOp(\"toANE\", [data]):\n ndata = ane.tensor(data.shape)\n ndata.data()[:] = data\n return ndata\n return data\n\n def to_(self, device):\n self.data, self.device = self._move_data(self.data, device), device\n if self.grad: self.grad.to_(device)\n\n def to(self, device):\n ret = Tensor(self.data, device)\n if self.grad: ret.grad = self.grad.to(device)\n return ret\n\n def detach(self):\n return Tensor(self.data, device=self.device)\n\n # ***** non first class ops *****\n\n def __getitem__(self, val):\n arg = []\n for i,s in enumerate(val if type(val) in [list, tuple] else ([] if val is None else [val])):\n arg.append((s.start if s.start is not None else 0,\n (s.stop if s.stop >=0 else self.shape[i]+s.stop) if s.stop is not None else self.shape[i]))\n assert s.step is None or s.step == 1\n return self.slice(arg = arg+[(0,self.shape[i]) for i in range(len(arg), len(self.shape))])\n\n def pad2d(self, padding):\n return self[:, :, -padding[2]:self.shape[2]+padding[3], -padding[0]:self.shape[3]+padding[1]]\n\n def dot(self, w):\n return self.matmul(w)\n\n def mean(self, axis=None):\n out = self.sum(axis=axis)\n return out * (np.prod(out.shape)/np.prod(self.shape))\n\n def sqrt(self):\n return self.pow(0.5)\n\n def div(self, y):\n return self * (y ** -1.0)\n\n def sigmoid(self):\n e = self.exp()\n return e.div(1 + e)\n\n def swish(self):\n return self * self.sigmoid()\n\n def tanh(self):\n return 2.0 * ((2.0 * self).sigmoid()) - 1.0\n\n def leakyrelu(self, neg_slope=0.01):\n return self.relu() - (-neg_slope*self).relu()\n\n def softmax(self):\n ns = list(self.shape)[:-1]+[1]\n m = self.max(axis=len(self.shape)-1).reshape(shape=ns)\n e = (self - m).exp()\n ss = e.sum(axis=len(self.shape)-1).reshape(shape=ns)\n return e.div(ss)\n\n def logsoftmax(self):\n ns = list(self.shape)[:-1]+[1]\n m = self.max(axis=len(self.shape)-1).reshape(shape=ns)\n ss = m + (self-m).exp().sum(axis=len(self.shape)-1).reshape(shape=ns).log()\n return self - ss\n\n def dropout(self, p=0.5):\n # TODO: this needs a test\n if Tensor.training:\n _mask = np.asarray(np.random.binomial(1, 1.0-p, size=self.shape), dtype=self.dtype)\n return self * Tensor(_mask, requires_grad=False, device=self.device) * (1/(1.0 - p))\n else:\n return self\n\n def abs(self):\n return self.relu() + (-1.0*self).relu()\n\n def _pool2d(self, py, px):\n xup = self[:, :, :self.shape[2]-self.shape[2]%py, :self.shape[3]-self.shape[3]%px]\n return xup.reshape(shape=(xup.shape[0], xup.shape[1], xup.shape[2]//py, py, xup.shape[3]//px, px))\n\n def avg_pool2d(self, kernel_size=(2,2)):\n return self._pool2d(*kernel_size).mean(axis=(3,5))\n\n def max_pool2d(self, kernel_size=(2,2)):\n return self._pool2d(*kernel_size).max(axis=(3,5))\n\n# An instantiation of the Function is the Context\nclass Function:\n def __init__(self, *tensors):\n self.parents = tensors\n self.saved_tensors = []\n\n def save_for_backward(self, *x):\n self.saved_tensors.extend(x)\n\n def apply(self, *x, **kwargs):\n ctx = self(*x) # self - operation i.e 'add', 'sub', etc.\n # use default params\n params = inspect.signature(self.forward).parameters\n for p in params.values():\n if p.default is not p.empty:\n setattr(ctx, p.name, p.default)\n # overwrite with passed params\n for k, v in kwargs.items():\n setattr(ctx, k, v)\n with ProfileOp(ctx.__class__.__name__, x):\n ret = Tensor(self.forward(ctx, *[t.data for t in x], **kwargs),\n device=ctx.device, requires_grad=any([t.requires_grad for t in x]))\n if ret.requires_grad:\n ret._ctx = ctx\n return ret\n\ndef register(name, fxn, device=Device.CPU):\n Tensor.ops[device][name] = fxn\n def dispatch(*x, **kwargs):\n tt = [arg for arg in x if isinstance(arg, Tensor)][0]\n x = [Tensor(np.array([arg], dtype=tt.dtype), device=tt.device, requires_grad=False) if not isinstance(arg, Tensor) else arg for arg in x]\n f = Tensor.ops[tt.device][name]\n f.cl_ctx, f.cl_queue, f.ane, f.device = cl_ctx, cl_queue, ane, tt.device\n return f.apply(f, *x, **kwargs)\n setattr(Tensor, name, dispatch)\n # TODO: div is a second class op, so it doesn't work here\n if name in ['add', 'sub', 'mul', 'pow', 'matmul']:\n setattr(Tensor, f\"__{name}__\", dispatch)\n setattr(Tensor, f\"__i{name}__\", lambda self,x: self.assign(dispatch(self,x)))\n setattr(Tensor, f\"__r{name}__\", lambda self,x: dispatch(x,self))\n\nfor device in [device for device in Device.__dict__.keys() if device[0] != \"_\"]:\n setattr(Tensor, f\"{device.lower()}\", functools.partialmethod(Tensor.to, Device.__dict__[device]))\n setattr(Tensor, f\"{device.lower()}_\", functools.partialmethod(Tensor.to_, Device.__dict__[device]))\n\n# this registers all the operations\ndef _register_ops(namespace, device=Device.CPU):\n for name, cls in inspect.getmembers(namespace, inspect.isclass):\n if name[0] != \"_\": register(name.lower(), cls, device=device)\n\nfrom tinygrad import ops_cpu\n_register_ops(ops_cpu)\ntry:\n import pyopencl as cl\n # TODO: move this import to require_init_gpu?\n from tinygrad import ops_gpu\n _register_ops(ops_gpu, device=Device.GPU)\n GPU = True\nexcept ImportError:\n # no GPU support\n GPU = False\nANE = False\n"
] | [
[
"numpy.array",
"numpy.random.binomial",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"numpy.random.randn",
"numpy.eye",
"numpy.prod",
"numpy.random.uniform"
]
] |
yulinliu101/DeepTP | [
"bc4f9adad6dda6c32e58026dda7863e0cb2a6072"
] | [
"src/evaluate_prediction.py"
] | [
"# -*- coding: utf-8 -*-\n# @Author: liuyulin\n# @Date: 2018-10-22 14:31:13\n# @Last Modified by: Yulin Liu\n# @Last Modified time: 2019-06-23 20:44:21\n\nimport numpy as np\nimport pandas as pd\nfrom visualize_samples import plot_fp_act\nimport pickle\nfrom scipy.interpolate import interp1d\nfrom utils import g\nimport matplotlib.pyplot as plt\n\npred, predicted_tracks_cov, buffer_total_logprob, buffer_pi_prob, predicted_matched_info = pickle.load(open('sample_results/all_lite_samp_mu_cov_test_s2_w80_batch0.pkl', 'rb'))\n\nclass evaluate_prediction:\n def __init__(self, \n pred_results_datapath_list,\n actual_track_datapath = '../../DATA/DeepTP/processed_flight_tracks.csv',\n flight_plan_datapath = '../../DATA/DeepTP/processed_flight_plans.csv',\n flight_plan_utilize_datapath = '../../DATA/DeepTP/IAH_BOS_Act_Flt_Trk_20130101_1231.CSV',\n feed_track_datapath = '../../DATA/DeepTP/test_flight_tracks_all.csv',\n feed_fp_datapath = '../../DATA/DeepTP/test_flight_plans_all.csv',\n n_mix = 3,\n search_pwr = 2,\n pred_dt = 120.\n ):\n self.pred_results_datapath_list = pred_results_datapath_list\n self.actual_track_datapath = actual_track_datapath\n self.flight_plan_datapath = flight_plan_datapath\n self.flight_plan_utilize_datapath = flight_plan_utilize_datapath\n self.feed_track_datapath = feed_track_datapath\n self.feed_fp_datapath = feed_fp_datapath\n\n self.n_mix = n_mix\n self.search_pwr = search_pwr\n self.pred_dt = pred_dt\n\n self.preds, \\\n self.pred_covs, \\\n self.pred_logprobs, \\\n self.act_track_data, \\\n self.FP_track, \\\n self.FP_utlize, \\\n self.feed_data, \\\n self.feed_fp = self._load_tracks()\n\n def _load_tracks(self):\n act_track_data = pd.read_csv(self.actual_track_datapath, header = 0)\n FP_track = pd.read_csv(self.flight_plan_datapath)\n FP_utlize = pd.read_csv(self.flight_plan_utilize_datapath, header = 0, usecols = [19,1])\n feed_data = pd.read_csv(self.feed_track_datapath, header = 0)\n feed_fp = pd.read_csv(self.feed_fp_datapath, header = 0)\n self.n_feed = feed_data.groupby('FID').FID.count().values[0] - 1\n\n act_track_data['cumDT'] = act_track_data.groupby('FID').DT.transform(pd.Series.cumsum)\n feed_data['cumDT'] = feed_data.groupby('FID').DT.transform(pd.Series.cumsum)\n\n preds = []\n pred_covs = []\n pred_logprobs = []\n for pfile in self.pred_results_datapath_list:\n with open(pfile, 'rb') as pfilein:\n pred, predicted_tracks_cov, buffer_total_logprob, _, _ = pickle.load(pfilein)\n preds.append(pred)\n pred_covs.append(predicted_tracks_cov)\n pred_logprobs.append(buffer_total_logprob)\n\n preds = np.concatenate(preds, axis = 0)\n pred_covs = np.concatenate(pred_covs, axis = 0)\n pred_logprobs = np.concatenate(pred_logprobs, axis = 0)\n\n return preds, pred_covs, pred_logprobs, act_track_data, FP_track, FP_utlize, feed_data, feed_fp\n\n\n def _best_sequence_idx(self,\n buffer_total_logprob,\n ):\n idx = self.n_mix**(self.search_pwr)\n n_predictions = buffer_total_logprob.shape[0]//idx\n best_seq_idx = []\n for i in range(n_predictions):\n best_seq_idx.append(np.argmax(buffer_total_logprob[i*idx:(i+1)*idx]) + i*idx)\n return best_seq_idx\n\n def _resample_interpolate_ground_truth(self):\n # resample ground truth to make it equal time interval as the predictions\n ground_truth = self.act_track_data.loc[self.act_track_data.FID.isin(self.feed_fp.FLT_PLAN_ID.unique())].reset_index(drop = True)\n ground_truth = ground_truth.drop(index = ground_truth.groupby('FID').head(self.n_feed).index)\n\n int_ground_truth_arr = self._interpolation(ground_truth)\n return int_ground_truth_arr\n\n def _interpolation(self,\n track_dataframe):\n new_series = []\n i = 0\n\n for idx, gp in track_dataframe.groupby('FID'):\n i += 1\n # Interpolated in terms of time\n # dold = gp.CumDist.values\n told = gp.cumDT.values\n xold = gp.Lon.values\n yold = gp.Lat.values\n zold = gp.Alt.values\n\n f1 = interp1d(told, xold, kind = 'linear')\n f2 = interp1d(told, yold, kind = 'linear')\n f3 = interp1d(told, zold, kind = 'linear')\n\n tnew = np.arange(told[0],told[-1], step = self.pred_dt)\n xnew = f1(tnew)\n ynew = f2(tnew)\n znew = f3(tnew)\n new_series.append(np.stack([ynew, xnew, znew], axis = 1))\n # new_series = np.array(new_series)\n\n return new_series\n\n def prediction_error(self, \n predictions,\n ground_truth = None,\n beam_search = True,\n resample_and_interpolation = True):\n if beam_search:\n best_seq_idx = self._best_sequence_idx(self.pred_logprobs)\n predictions = predictions[best_seq_idx, ] # shape of [n_seq, n_time, 6|--> lat lon alt cumT latspd lonspd]\n if ground_truth is not None:\n self.ground_truth = ground_truth.copy()\n else:\n if resample_and_interpolation:\n self.ground_truth = self._resample_interpolate_ground_truth() # list of arrays with shape of [n_time, 3]\n else:\n raise ValueError(\"No ground truth!\")\n \n avg_horizontal_err = []\n avg_vertical_err = []\n all_horizontal_err = []\n all_vertical_err = []\n for i in range(len(self.ground_truth)):\n n_pnt = min(self.ground_truth[i].shape[0], predictions[i].shape[0] - self.n_feed - 1)\n # print(n_pnt)\n _, _, dist = g.inv(self.ground_truth[i][:n_pnt, 1], \n self.ground_truth[i][:n_pnt, 0], \n predictions[i][self.n_feed:self.n_feed+n_pnt, 1], \n predictions[i][self.n_feed:self.n_feed+n_pnt, 0])\n\n alt_dist = 100*(self.ground_truth[i][:n_pnt, 2] - predictions[i][self.n_feed:self.n_feed+n_pnt, 2]) # ft.\n \n all_horizontal_err += list(dist/1852)\n all_vertical_err += list(alt_dist)\n\n avg_horizontal_err.append(np.mean(np.abs((dist/1852)))) # in nmi\n avg_vertical_err.append(np.mean(np.abs(alt_dist)))\n # avg_horizontal_err.append(np.sqrt(np.mean((dist/1852)**2))) # in nmi\n # avg_vertical_err.append(np.sqrt(np.mean(alt_dist**2)))\n \n return np.array(avg_horizontal_err), np.array(avg_vertical_err), np.array(all_horizontal_err), np.array(all_vertical_err)\n\n def prediction_coverage(self, \n n_std,\n predictions,\n prediction_cov,\n ground_truth = None,\n beam_search = True,\n resample_and_interpolation = True):\n if beam_search:\n best_seq_idx = self._best_sequence_idx(self.pred_logprobs)\n predictions = predictions[best_seq_idx, ] # shape of [n_seq, n_time, 6|--> lat lon alt cumT latspd lonspd]\n predictions_cov = np.sqrt(prediction_cov[best_seq_idx, ]) # shape of [n_seq, n_time - n_feed-1, 5,5|--> lat lon alt latspd lonspd]\n if ground_truth is not None:\n self.ground_truth = ground_truth.copy()\n else:\n if resample_and_interpolation:\n self.ground_truth = self._resample_interpolate_ground_truth() # list of arrays with shape of [n_time, 3]\n else:\n raise ValueError(\"No ground truth!\")\n \n n_horizotal_cover = []\n n_vertical_cover = []\n n_full_cover = []\n\n percentage_horizotal_cover = []\n percentage_vertical_cover = []\n percentage_full_cover = []\n\n total_pts = 0\n for i in range(len(self.ground_truth)):\n n_pnt = min(self.ground_truth[i].shape[0], predictions[i].shape[0] - self.n_feed - 1)\n\n _cond_lat_rhs = (self.ground_truth[i][:n_pnt, 0] <= (predictions[i][self.n_feed:self.n_feed+n_pnt, 0] + predictions_cov[i][:n_pnt, 0, 0] * n_std)) # lat\n _cond_lat_lhs = (self.ground_truth[i][:n_pnt, 0] >= (predictions[i][self.n_feed:self.n_feed+n_pnt, 0] - predictions_cov[i][:n_pnt, 0, 0] * n_std)) # lat\n _cond_lon_rhs = (self.ground_truth[i][:n_pnt, 1] <= (predictions[i][self.n_feed:self.n_feed+n_pnt, 1] + predictions_cov[i][:n_pnt, 1, 1] * n_std)) # lon\n _cond_lon_lhs = (self.ground_truth[i][:n_pnt, 1] >= (predictions[i][self.n_feed:self.n_feed+n_pnt, 1] - predictions_cov[i][:n_pnt, 1, 1] * n_std)) # lon\n _cond_alt_rhs = (self.ground_truth[i][:n_pnt, 2] <= (predictions[i][self.n_feed:self.n_feed+n_pnt, 2] + predictions_cov[i][:n_pnt, 2, 2] * n_std)) # alt\n _cond_alt_lhs = (self.ground_truth[i][:n_pnt, 2] >= (predictions[i][self.n_feed:self.n_feed+n_pnt, 2] - predictions_cov[i][:n_pnt, 2, 2] * n_std)) # alt\n\n _horizontal_cond = (_cond_lat_lhs & _cond_lat_rhs & _cond_lon_lhs & _cond_lon_rhs)\n _vertical_cond = (_cond_alt_rhs & _cond_alt_lhs)\n _full_cond = (_horizontal_cond & _vertical_cond)\n \n n_horizotal_cover.append(_horizontal_cond.sum())\n percentage_horizotal_cover.append(_horizontal_cond.sum()/n_pnt)\n\n n_vertical_cover.append(_vertical_cond.sum())\n percentage_vertical_cover.append(_vertical_cond.sum()/n_pnt)\n\n n_full_cover.append(_full_cond.sum())\n percentage_full_cover.append(_full_cond.sum()/n_pnt)\n\n total_pts += n_pnt\n \n return (np.array(percentage_horizotal_cover), \n np.array(percentage_vertical_cover), \n np.array(percentage_full_cover), \n sum(n_horizotal_cover)/total_pts, \n sum(n_vertical_cover)/total_pts,\n sum(n_full_cover)/total_pts)\n\n def plot_hist(self, \n all_hor_err,\n avg_horizontal_err,\n all_alt_err,\n avg_vertical_err):\n fig, axs = plt.subplots(2, 2, figsize=(10,6), facecolor='w', edgecolor='k')\n fig.subplots_adjust(wspace = 0.2, hspace = 0.35)\n axs = axs.ravel()\n _ = axs[0].hist(all_hor_err, 50, range = (0, 200), density = True)\n _ = axs[0].set_title('Horizontal Error (All)')\n _ = axs[0].set_xlabel('Distance/ nmi')\n _ = axs[1].hist(avg_horizontal_err, 50, range = (0, 200), density = True)\n _ = axs[1].set_title('Horizontal Error (Flight)')\n _ = axs[1].set_xlabel('Distance/ nmi')\n _ = axs[2].hist(all_alt_err, 25, range = (-150, 150), density = True)\n _ = axs[2].set_title('Vertical Error (All)')\n _ = axs[2].set_xlabel('Distance/ FL')\n _ = axs[3].hist(avg_vertical_err, 25, range = (0, 150), density = True)\n _ = axs[3].set_title('Vertical Error (Flight)')\n _ = axs[3].set_xlabel('Distance/ FL')\n return"
] | [
[
"numpy.concatenate",
"scipy.interpolate.interp1d",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.stack",
"numpy.arange",
"numpy.argmax",
"numpy.sqrt",
"numpy.abs",
"pandas.read_csv"
]
] |
alowet/iblapps | [
"9be936cd6806153dde0cbff1b6f2180191de3aeb"
] | [
"task_qc_viewer/ViewEphysQC.py"
] | [
"import logging\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT\nimport pandas as pd\nimport numpy as np\n\nimport qt as qt\n\n_logger = logging.getLogger('ibllib')\n\n\nclass DataFrameModel(QtCore.QAbstractTableModel):\n DtypeRole = QtCore.Qt.UserRole + 1000\n ValueRole = QtCore.Qt.UserRole + 1001\n\n def __init__(self, df=pd.DataFrame(), parent=None):\n super(DataFrameModel, self).__init__(parent)\n self._dataframe = df\n\n def setDataFrame(self, dataframe):\n self.beginResetModel()\n self._dataframe = dataframe.copy()\n self.endResetModel()\n\n def dataFrame(self):\n return self._dataframe\n\n dataFrame = QtCore.pyqtProperty(pd.DataFrame, fget=dataFrame, fset=setDataFrame)\n\n @QtCore.pyqtSlot(int, QtCore.Qt.Orientation, result=str)\n def headerData(self, section: int, orientation: QtCore.Qt.Orientation,\n role: int = QtCore.Qt.DisplayRole):\n if role == QtCore.Qt.DisplayRole:\n if orientation == QtCore.Qt.Horizontal:\n return self._dataframe.columns[section]\n else:\n return str(self._dataframe.index[section])\n return QtCore.QVariant()\n\n def rowCount(self, parent=QtCore.QModelIndex()):\n if parent.isValid():\n return 0\n return len(self._dataframe.index)\n\n def columnCount(self, parent=QtCore.QModelIndex()):\n if parent.isValid():\n return 0\n return self._dataframe.columns.size\n\n def data(self, index, role=QtCore.Qt.DisplayRole):\n if (not index.isValid() or not (0 <= index.row() < self.rowCount() and\n 0 <= index.column() < self.columnCount())):\n return QtCore.QVariant()\n row = self._dataframe.index[index.row()]\n col = self._dataframe.columns[index.column()]\n dt = self._dataframe[col].dtype\n\n val = self._dataframe.iloc[row][col]\n if role == QtCore.Qt.DisplayRole:\n return str(val)\n elif role == DataFrameModel.ValueRole:\n return val\n if role == DataFrameModel.DtypeRole:\n return dt\n return QtCore.QVariant()\n\n def roleNames(self):\n roles = {\n QtCore.Qt.DisplayRole: b'display',\n DataFrameModel.DtypeRole: b'dtype',\n DataFrameModel.ValueRole: b'value'\n }\n return roles\n\n def sort(self, col, order):\n \"\"\"\n Sort table by given column number\n :param col: the column number selected (between 0 and self._dataframe.columns.size)\n :param order: the order to be sorted, 0 is descending; 1, ascending\n :return:\n \"\"\"\n self.layoutAboutToBeChanged.emit()\n col_name = self._dataframe.columns.values[col]\n # print('sorting by ' + col_name)\n self._dataframe.sort_values(by=col_name, ascending=not order, inplace=True)\n self._dataframe.reset_index(inplace=True, drop=True)\n self.layoutChanged.emit()\n\n\nclass PlotCanvas(FigureCanvasQTAgg):\n\n def __init__(self, parent=None, width=5, height=4, dpi=100, wheel=None):\n fig = Figure(figsize=(width, height), dpi=dpi)\n\n FigureCanvasQTAgg.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvasQTAgg.setSizePolicy(\n self,\n QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Expanding)\n FigureCanvasQTAgg.updateGeometry(self)\n if wheel:\n self.ax, self.ax2 = fig.subplots(\n 2, 1, gridspec_kw={'height_ratios': [2, 1]}, sharex=True)\n else:\n self.ax = fig.add_subplot(111)\n self.draw()\n\n\nclass PlotWindow(QtWidgets.QWidget):\n def __init__(self, parent=None, wheel=None):\n QtWidgets.QWidget.__init__(self, parent=None)\n self.canvas = PlotCanvas(wheel=wheel)\n self.vbl = QtWidgets.QVBoxLayout() # Set box for plotting\n self.vbl.addWidget(self.canvas)\n self.setLayout(self.vbl)\n self.vbl.addWidget(NavigationToolbar2QT(self.canvas, self))\n\n\nclass GraphWindow(QtWidgets.QWidget):\n def __init__(self, parent=None, wheel=None):\n QtWidgets.QWidget.__init__(self, parent=None)\n vLayout = QtWidgets.QVBoxLayout(self)\n hLayout = QtWidgets.QHBoxLayout()\n self.pathLE = QtWidgets.QLineEdit(self)\n hLayout.addWidget(self.pathLE)\n self.loadBtn = QtWidgets.QPushButton(\"Select File\", self)\n hLayout.addWidget(self.loadBtn)\n vLayout.addLayout(hLayout)\n self.pandasTv = QtWidgets.QTableView(self)\n vLayout.addWidget(self.pandasTv)\n self.loadBtn.clicked.connect(self.loadFile)\n self.pandasTv.setSortingEnabled(True)\n self.pandasTv.doubleClicked.connect(self.tv_double_clicked)\n self.wplot = PlotWindow(wheel=wheel)\n self.wplot.show()\n self.wheel = wheel\n\n def loadFile(self):\n fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Open File\", \"\",\n \"CSV Files (*.csv)\")\n self.pathLE.setText(fileName)\n df = pd.read_csv(fileName)\n self.update_df(df)\n\n def update_df(self, df):\n model = DataFrameModel(df)\n self.pandasTv.setModel(model)\n self.wplot.canvas.draw()\n\n def tv_double_clicked(self):\n df = self.pandasTv.model()._dataframe\n ind = self.pandasTv.currentIndex()\n start = df.loc[ind.row()]['intervals_0']\n finish = df.loc[ind.row()]['intervals_1']\n dt = finish - start\n if self.wheel:\n idx = np.searchsorted(self.wheel['re_ts'], np.array([start - dt / 10,\n finish + dt / 10]))\n max_val = np.max(self.wheel['re_pos'][idx[0]:idx[1]])\n min_val = np.min(self.wheel['re_pos'][idx[0]:idx[1]])\n self.wplot.canvas.ax2.set_ylim(min_val - 1, max_val + 1)\n self.wplot.canvas.ax2.set_xlim(start - dt / 10, finish + dt / 10)\n self.wplot.canvas.ax.set_xlim(start - dt / 10, finish + dt / 10)\n\n self.wplot.canvas.draw()\n\n\ndef viewqc(qc=None, title=None, wheel=None):\n qt.create_app()\n qcw = GraphWindow(wheel=wheel)\n qcw.setWindowTitle(title)\n if qc is not None:\n qcw.update_df(qc)\n qcw.show()\n return qcw\n"
] | [
[
"numpy.max",
"numpy.array",
"pandas.DataFrame",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__",
"numpy.min",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry",
"pandas.read_csv",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy"
]
] |
Minsoo2022/Pose-Transfer | [
"10a60bb33d51a06e1200f5726f2367b5be4a6b79"
] | [
"models/inpainting.py"
] | [
"import numpy as np\nimport torch\nimport os\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nimport itertools\nimport util.util as util\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\n# losses\nfrom losses.L1_plus_perceptualLoss import L1_plus_perceptualLoss\n\nfrom .src import dp, grid_sampler\n\nimport sys\nimport torch.nn.functional as F\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nimport torch.nn as nn\n\nclass DeformablePipe(BaseModel):\n def name(self):\n return 'DeformablePipe'\n\n def initialize(self, opt):\n BaseModel.initialize(self, opt)\n\n nb = opt.batchSize\n size = opt.fineSize\n self.input_P1_set = self.Tensor(nb, opt.P_input_nc, size, size)\n self.input_BP1_set = self.Tensor(nb, opt.BP_input_nc, size, size)\n self.input_P2_set = self.Tensor(nb, opt.P_input_nc, size, size)\n self.input_BP2_set = self.Tensor(nb, opt.BP_input_nc, size, size)\n\n input_nc = [opt.P_input_nc, opt.BP_input_nc+opt.BP_input_nc]\n\n self.netG = dp.GatedHourglass(32, 5, 2).cuda(self.gpu_ids[0])\n # self.refiner = dp.ResHourglassDeformableSkip(8, 10, 3, ngf=256).cuda(self.gpu_ids[0])\n self.sampler = grid_sampler.InvGridSamplerDecomposed(return_B=True, hole_fill_color=0.).cuda(self.gpu_ids[0])\n\n # self.netG = networks.define_G(input_nc, opt.P_input_nc,\n # opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids,\n # n_downsampling=opt.G_n_downsampling)\n\n # if self.isTrain:\n # use_sigmoid = opt.no_lsgan\n # if opt.with_D_PB:\n # self.netD_PB = networks.define_D(opt.P_input_nc+opt.BP_input_nc, opt.ndf,\n # opt.which_model_netD,\n # opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids,\n # not opt.no_dropout_D,\n # n_downsampling = opt.D_n_downsampling)\n #\n # if opt.with_D_PP:\n # self.netD_PP = networks.define_D(opt.P_input_nc+opt.P_input_nc, opt.ndf,\n # opt.which_model_netD,\n # opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids,\n # not opt.no_dropout_D,\n # n_downsampling = opt.D_n_downsampling)\n\n if not self.isTrain or opt.continue_train:\n which_epoch = opt.which_epoch\n self.load_network(self.netG, 'netG', which_epoch)\n # if self.isTrain:\n # if opt.with_D_PB:\n # self.load_network(self.netD_PB, 'netD_PB', which_epoch)\n # if opt.with_D_PP:\n # self.load_network(self.netD_PP, 'netD_PP', which_epoch)\n\n\n if self.isTrain:\n self.old_lr = opt.lr\n self.fake_PP_pool = ImagePool(opt.pool_size)\n self.fake_PB_pool = ImagePool(opt.pool_size)\n # define loss functions\n # self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)\n\n if opt.L1_type == 'origin':\n self.criterionL1 = torch.nn.L1Loss()\n elif opt.L1_type == 'l1_plus_perL1':\n self.criterionL1 = L1_plus_perceptualLoss(opt.lambda_A, opt.lambda_B, opt.perceptual_layers, self.gpu_ids, opt.percep_is_l1)\n else:\n raise Excption('Unsurportted type of L1!')\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # if opt.with_D_PB:\n # self.optimizer_D_PB = torch.optim.Adam(self.netD_PB.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # if opt.with_D_PP:\n # self.optimizer_D_PP = torch.optim.Adam(self.netD_PP.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\n self.optimizers = []\n self.schedulers = []\n self.optimizers.append(self.optimizer_G)\n # if opt.with_D_PB:\n # self.optimizers.append(self.optimizer_D_PB)\n # if opt.with_D_PP:\n # self.optimizers.append(self.optimizer_D_PP)\n for optimizer in self.optimizers:\n self.schedulers.append(networks.get_scheduler(optimizer, opt))\n\n print('---------- Networks initialized -------------')\n networks.print_network(self.netG)\n # if self.isTrain:\n # if opt.with_D_PB:\n # networks.print_network(self.netD_PB)\n # if opt.with_D_PP:\n # networks.print_network(self.netD_PP)\n # print('-----------------------------------------------')\n\n def set_input(self, input):\n input_P1, input_BP1 = input['P1'], input['BP1']\n input_P2, input_BP2 = input['P2'], input['BP2']\n\n self.input_P1_set.resize_(input_P1.size()).copy_(input_P1)\n self.input_BP1_set.resize_(input_BP1.size()).copy_(input_BP1)\n self.input_P2_set.resize_(input_P2.size()).copy_(input_P2)\n self.input_BP2_set.resize_(input_BP2.size()).copy_(input_BP2)\n\n self.image_paths = input['P1_path'][0] + '___' + input['P2_path'][0]\n\n\n def forward(self):\n self.input_P1 = Variable(self.input_P1_set)\n self.input_BP1 = Variable(self.input_BP1_set)\n\n self.input_P2 = Variable(self.input_P2_set)\n self.input_BP2 = Variable(self.input_BP2_set)\n\n G_input = [self.input_P1,\n torch.cat((self.input_BP1, self.input_BP2), 1)]\n self.fake_p2 = self.netG(G_input)\n\n\n def test(self):\n self.input_P1 = Variable(self.input_P1_set)\n self.input_BP1 = Variable(self.input_BP1_set)\n\n self.input_P2 = Variable(self.input_P2_set)\n self.input_BP2 = Variable(self.input_BP2_set)\n\n G_input = [self.input_P1,\n torch.cat((self.input_BP1, self.input_BP2), 1)]\n self.fake_p2 = self.netG(G_input)\n\n\n # get image paths\n def get_image_paths(self):\n return self.image_paths\n\n\n def backward_G(self):\n if self.opt.with_D_PB:\n pred_fake_PB = self.netD_PB(torch.cat((self.fake_p2, self.input_BP2), 1))\n self.loss_G_GAN_PB = self.criterionGAN(pred_fake_PB, True)\n\n if self.opt.with_D_PP:\n pred_fake_PP = self.netD_PP(torch.cat((self.fake_p2, self.input_P1), 1))\n self.loss_G_GAN_PP = self.criterionGAN(pred_fake_PP, True)\n\n # L1 loss\n if self.opt.L1_type == 'l1_plus_perL1' :\n losses = self.criterionL1(self.fake_p2, self.input_P2)\n self.loss_G_L1 = losses[0]\n self.loss_originL1 = losses[1].data[0]\n self.loss_perceptual = losses[2].data[0]\n else:\n self.loss_G_L1 = self.criterionL1(self.fake_p2, self.input_P2) * self.opt.lambda_A\n\n\n pair_L1loss = self.loss_G_L1\n if self.opt.with_D_PB:\n pair_GANloss = self.loss_G_GAN_PB * self.opt.lambda_GAN\n if self.opt.with_D_PP:\n pair_GANloss += self.loss_G_GAN_PP * self.opt.lambda_GAN\n pair_GANloss = pair_GANloss / 2\n else:\n if self.opt.with_D_PP:\n pair_GANloss = self.loss_G_GAN_PP * self.opt.lambda_GAN\n\n if self.opt.with_D_PB or self.opt.with_D_PP:\n pair_loss = pair_L1loss + pair_GANloss\n else:\n pair_loss = pair_L1loss\n\n pair_loss.backward()\n\n self.pair_L1loss = pair_L1loss.data[0]\n if self.opt.with_D_PB or self.opt.with_D_PP:\n self.pair_GANloss = pair_GANloss.data[0]\n\n\n def backward_D_basic(self, netD, real, fake):\n # Real\n pred_real = netD(real)\n loss_D_real = self.criterionGAN(pred_real, True) * self.opt.lambda_GAN\n # Fake\n pred_fake = netD(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False) * self.opt.lambda_GAN\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n\n # D: take(P, B) as input\n def backward_D_PB(self):\n real_PB = torch.cat((self.input_P2, self.input_BP2), 1)\n # fake_PB = self.fake_PB_pool.query(torch.cat((self.fake_p2, self.input_BP2), 1))\n fake_PB = self.fake_PB_pool.query( torch.cat((self.fake_p2, self.input_BP2), 1).data )\n loss_D_PB = self.backward_D_basic(self.netD_PB, real_PB, fake_PB)\n self.loss_D_PB = loss_D_PB.data[0]\n\n # D: take(P, P') as input\n def backward_D_PP(self):\n real_PP = torch.cat((self.input_P2, self.input_P1), 1)\n # fake_PP = self.fake_PP_pool.query(torch.cat((self.fake_p2, self.input_P1), 1))\n fake_PP = self.fake_PP_pool.query( torch.cat((self.fake_p2, self.input_P1), 1).data )\n loss_D_PP = self.backward_D_basic(self.netD_PP, real_PP, fake_PP)\n self.loss_D_PP = loss_D_PP.data[0]\n\n\n def optimize_parameters(self):\n # forward\n self.forward()\n\n self.optimizer_G.zero_grad()\n self.backward_G()\n self.optimizer_G.step()\n\n # D_P\n if self.opt.with_D_PP:\n for i in range(self.opt.DG_ratio):\n self.optimizer_D_PP.zero_grad()\n self.backward_D_PP()\n self.optimizer_D_PP.step()\n\n # D_BP\n if self.opt.with_D_PB:\n for i in range(self.opt.DG_ratio):\n self.optimizer_D_PB.zero_grad()\n self.backward_D_PB()\n self.optimizer_D_PB.step()\n\n\n def get_current_errors(self):\n ret_errors = OrderedDict([ ('pair_L1loss', self.pair_L1loss)])\n if self.opt.with_D_PP:\n ret_errors['D_PP'] = self.loss_D_PP\n if self.opt.with_D_PB:\n ret_errors['D_PB'] = self.loss_D_PB\n if self.opt.with_D_PB or self.opt.with_D_PP:\n ret_errors['pair_GANloss'] = self.pair_GANloss\n\n if self.opt.L1_type == 'l1_plus_perL1':\n ret_errors['origin_L1'] = self.loss_originL1\n ret_errors['perceptual'] = self.loss_perceptual\n\n return ret_errors\n\n def get_current_visuals(self):\n height, width = self.input_P1.size(2), self.input_P1.size(3)\n input_P1 = util.tensor2im(self.input_P1.data)\n input_P2 = util.tensor2im(self.input_P2.data)\n\n input_BP1 = util.draw_pose_from_map(self.input_BP1.data)[0]\n input_BP2 = util.draw_pose_from_map(self.input_BP2.data)[0]\n\n fake_p2 = util.tensor2im(self.fake_p2.data)\n\n vis = np.zeros((height, width*5, 3)).astype(np.uint8) #h, w, c\n vis[:, :width, :] = input_P1\n vis[:, width:width*2, :] = input_BP1\n vis[:, width*2:width*3, :] = input_P2\n vis[:, width*3:width*4, :] = input_BP2\n vis[:, width*4:, :] = fake_p2\n\n ret_visuals = OrderedDict([('vis', vis)])\n\n return ret_visuals\n\n def save(self, label):\n self.save_network(self.netG, 'netG', label, self.gpu_ids)\n if self.opt.with_D_PB:\n self.save_network(self.netD_PB, 'netD_PB', label, self.gpu_ids)\n if self.opt.with_D_PP:\n self.save_network(self.netD_PP, 'netD_PP', label, self.gpu_ids)\n\n"
] | [
[
"torch.autograd.Variable",
"torch.cat",
"numpy.zeros",
"torch.nn.L1Loss"
]
] |
p4vv37/pyradox | [
"cfc8c07d637a1cc189dd8d200f8a55d00405b81f"
] | [
"tests/test_res_next.py"
] | [
"import sys, os\n\nsys.path.append(os.path.dirname(os.getcwd()))\n\nfrom tensorflow import keras\nimport numpy as np\nfrom pyradox import convnets\n\n\ndef test():\n inputs = keras.Input(shape=(28, 28, 1))\n x = keras.layers.ZeroPadding2D(2)(\n inputs\n ) # padding to increase dimenstions to 32x32\n x = keras.layers.Conv2D(3, 1, padding=\"same\")(\n x\n ) # increasing the number of channels to 3\n x = convnets.ResNeXt([(32, 3), (64, 5), (32, 5), (64, 3)])(x)\n x = keras.layers.GlobalAvgPool2D()(x)\n outputs = keras.layers.Dense(10, activation=\"softmax\")(x)\n\n model = keras.models.Model(inputs=inputs, outputs=outputs)\n"
] | [
[
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Input",
"tensorflow.keras.layers.ZeroPadding2D"
]
] |
srinivas212/dlrm | [
"03a1534ff9fbe6a531609100a08d6f41d7f48d29"
] | [
"dlrm_s_pytorch.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n# Description: an implementation of a deep learning recommendation model (DLRM)\n# The model input consists of dense and sparse features. The former is a vector\n# of floating point values. The latter is a list of sparse indices into\n# embedding tables, which consist of vectors of floating point values.\n# The selected vectors are passed to mlp networks denoted by triangles,\n# in some cases the vectors are interacted through operators (Ops).\n#\n# output:\n# vector of values\n# model: |\n# /\\\n# /__\\\n# |\n# _____________________> Op <___________________\n# / | \\\n# /\\ /\\ /\\\n# /__\\ /__\\ ... /__\\\n# | | |\n# | Op Op\n# | ____/__\\_____ ____/__\\____\n# | |_Emb_|____|__| ... |_Emb_|__|___|\n# input:\n# [ dense features ] [sparse indices] , ..., [sparse indices]\n#\n# More precise definition of model layers:\n# 1) fully connected layers of an mlp\n# z = f(y)\n# y = Wx + b\n#\n# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk])\n# z = Op(e1,...,ek)\n# obtain vectors e1=E[:,p1], ..., ek=E[:,pk]\n#\n# 3) Operator Op can be one of the following\n# Sum(e1,...,ek) = e1 + ... + ek\n# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek]\n# Cat(e1,...,ek) = [e1', ..., ek']'\n# where ' denotes transpose operation\n#\n# References:\n# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang,\n# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu,\n# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii,\n# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko,\n# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong,\n# Misha Smelyanskiy, \"Deep Learning Recommendation Model for Personalization and\n# Recommendation Systems\", CoRR, arXiv:1906.00091, 2019\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# miscellaneous\nimport builtins\nimport functools\n# import bisect\n# import shutil\nimport time\nimport json\n# data generation\nimport dlrm_data_pytorch as dp\n\n# numpy\nimport numpy as np\n\n# onnx\n# The onnx import causes deprecation warnings every time workers\n# are spawned during testing. So, we filter out those warnings.\nimport warnings\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nimport onnx\n\n# pytorch\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel.parallel_apply import parallel_apply\nfrom torch.nn.parallel.replicate import replicate\nfrom torch.nn.parallel.scatter_gather import gather, scatter\n# quotient-remainder trick\nfrom tricks.qr_embedding_bag import QREmbeddingBag\n# mixed-dimension trick\nfrom tricks.md_embedding_bag import PrEmbeddingBag, md_solver\n\nimport sklearn.metrics\n\n# from torchviz import make_dot\n# import torch.nn.functional as Functional\n# from torch.nn.parameter import Parameter\n\nexc = getattr(builtins, \"IOError\", \"FileNotFoundError\")\n\n\n### define dlrm in PyTorch ###\nclass DLRM_Net(nn.Module):\n def create_mlp(self, ln, sigmoid_layer):\n # build MLP layer by layer\n layers = nn.ModuleList()\n for i in range(0, ln.size - 1):\n n = ln[i]\n m = ln[i + 1]\n\n # construct fully connected operator\n LL = nn.Linear(int(n), int(m), bias=True)\n\n # initialize the weights\n # with torch.no_grad():\n # custom Xavier input, output or two-sided fill\n mean = 0.0 # std_dev = np.sqrt(variance)\n std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)\n W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)\n std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))\n bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)\n # approach 1\n LL.weight.data = torch.tensor(W, requires_grad=True)\n LL.bias.data = torch.tensor(bt, requires_grad=True)\n # approach 2\n # LL.weight.data.copy_(torch.tensor(W))\n # LL.bias.data.copy_(torch.tensor(bt))\n # approach 3\n # LL.weight = Parameter(torch.tensor(W),requires_grad=True)\n # LL.bias = Parameter(torch.tensor(bt),requires_grad=True)\n layers.append(LL)\n\n # construct sigmoid or relu operator\n if i == sigmoid_layer:\n layers.append(nn.Sigmoid())\n else:\n layers.append(nn.ReLU())\n\n # approach 1: use ModuleList\n # return layers\n # approach 2: use Sequential container to wrap all layers\n return torch.nn.Sequential(*layers)\n\n def create_emb(self, m, ln):\n emb_l = nn.ModuleList()\n for i in range(0, ln.size):\n n = ln[i]\n # construct embedding operator\n if self.qr_flag and n > self.qr_threshold:\n EE = QREmbeddingBag(n, m, self.qr_collisions,\n operation=self.qr_operation, mode=\"sum\", sparse=True)\n elif self.md_flag and n > self.md_threshold:\n _m = m[i]\n base = max(m)\n EE = PrEmbeddingBag(n, _m, base)\n # use np initialization as below for consistency...\n W = np.random.uniform(\n low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m)\n ).astype(np.float32)\n EE.embs.weight.data = torch.tensor(W, requires_grad=True)\n\n else:\n EE = nn.EmbeddingBag(n, m, mode=\"sum\", sparse=True)\n\n # initialize embeddings\n # nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n))\n W = np.random.uniform(\n low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)\n ).astype(np.float32)\n # approach 1\n EE.weight.data = torch.tensor(W, requires_grad=True)\n # approach 2\n # EE.weight.data.copy_(torch.tensor(W))\n # approach 3\n # EE.weight = Parameter(torch.tensor(W),requires_grad=True)\n\n emb_l.append(EE)\n\n return emb_l\n\n def __init__(\n self,\n m_spa=None,\n ln_emb=None,\n ln_bot=None,\n ln_top=None,\n arch_interaction_op=None,\n arch_interaction_itself=False,\n sigmoid_bot=-1,\n sigmoid_top=-1,\n sync_dense_params=True,\n loss_threshold=0.0,\n ndevices=-1,\n qr_flag=False,\n qr_operation=\"mult\",\n qr_collisions=0,\n qr_threshold=200,\n md_flag=False,\n md_threshold=200,\n ):\n super(DLRM_Net, self).__init__()\n\n if (\n (m_spa is not None)\n and (ln_emb is not None)\n and (ln_bot is not None)\n and (ln_top is not None)\n and (arch_interaction_op is not None)\n ):\n\n # save arguments\n self.ndevices = ndevices\n self.output_d = 0\n self.parallel_model_batch_size = -1\n self.parallel_model_is_not_prepared = True\n self.arch_interaction_op = arch_interaction_op\n self.arch_interaction_itself = arch_interaction_itself\n self.sync_dense_params = sync_dense_params\n self.loss_threshold = loss_threshold\n # create variables for QR embedding if applicable\n self.qr_flag = qr_flag\n if self.qr_flag:\n self.qr_collisions = qr_collisions\n self.qr_operation = qr_operation\n self.qr_threshold = qr_threshold\n # create variables for MD embedding if applicable\n self.md_flag = md_flag\n if self.md_flag:\n self.md_threshold = md_threshold\n # create operators\n if ndevices <= 1:\n self.emb_l = self.create_emb(m_spa, ln_emb)\n self.bot_l = self.create_mlp(ln_bot, sigmoid_bot)\n self.top_l = self.create_mlp(ln_top, sigmoid_top)\n\n def apply_mlp(self, x, layers):\n # approach 1: use ModuleList\n # for layer in layers:\n # x = layer(x)\n # return x\n # approach 2: use Sequential container to wrap all layers\n return layers(x)\n\n def apply_emb(self, lS_o, lS_i, emb_l):\n # WARNING: notice that we are processing the batch at once. We implicitly\n # assume that the data is laid out such that:\n # 1. each embedding is indexed with a group of sparse indices,\n # corresponding to a single lookup\n # 2. for each embedding the lookups are further organized into a batch\n # 3. for a list of embedding tables there is a list of batched lookups\n\n ly = []\n for k, sparse_index_group_batch in enumerate(lS_i):\n sparse_offset_group_batch = lS_o[k]\n\n # embedding lookup\n # We are using EmbeddingBag, which implicitly uses sum operator.\n # The embeddings are represented as tall matrices, with sum\n # happening vertically across 0 axis, resulting in a row vector\n E = emb_l[k]\n V = E(sparse_index_group_batch, sparse_offset_group_batch)\n\n ly.append(V)\n\n # print(ly)\n return ly\n\n def interact_features(self, x, ly):\n if self.arch_interaction_op == \"dot\":\n # concatenate dense and sparse features\n (batch_size, d) = x.shape\n T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))\n # perform a dot product\n Z = torch.bmm(T, torch.transpose(T, 1, 2))\n # append dense feature with the interactions (into a row vector)\n # approach 1: all\n # Zflat = Z.view((batch_size, -1))\n # approach 2: unique\n _, ni, nj = Z.shape\n # approach 1: tril_indices\n # offset = 0 if self.arch_interaction_itself else -1\n # li, lj = torch.tril_indices(ni, nj, offset=offset)\n # approach 2: custom\n offset = 1 if self.arch_interaction_itself else 0\n li = torch.tensor([i for i in range(ni) for j in range(i + offset)])\n lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])\n Zflat = Z[:, li, lj]\n # concatenate dense features and interactions\n R = torch.cat([x] + [Zflat], dim=1)\n elif self.arch_interaction_op == \"cat\":\n # concatenation features (into a row vector)\n R = torch.cat([x] + ly, dim=1)\n else:\n sys.exit(\n \"ERROR: --arch-interaction-op=\"\n + self.arch_interaction_op\n + \" is not supported\"\n )\n\n return R\n\n def forward(self, dense_x, lS_o, lS_i):\n if self.ndevices <= 1:\n return self.sequential_forward(dense_x, lS_o, lS_i)\n else:\n return self.parallel_forward(dense_x, lS_o, lS_i)\n\n def sequential_forward(self, dense_x, lS_o, lS_i):\n # process dense features (using bottom mlp), resulting in a row vector\n x = self.apply_mlp(dense_x, self.bot_l)\n # debug prints\n # print(\"intermediate\")\n # print(x.detach().cpu().numpy())\n\n # process sparse features(using embeddings), resulting in a list of row vectors\n ly = self.apply_emb(lS_o, lS_i, self.emb_l)\n # for y in ly:\n # print(y.detach().cpu().numpy())\n\n # interact features (dense and sparse)\n z = self.interact_features(x, ly)\n # print(z.detach().cpu().numpy())\n\n # obtain probability of a click (using top mlp)\n p = self.apply_mlp(z, self.top_l)\n\n # clamp output if needed\n if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:\n z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))\n else:\n z = p\n\n return z\n\n def parallel_forward(self, dense_x, lS_o, lS_i):\n ### prepare model (overwrite) ###\n # WARNING: # of devices must be >= batch size in parallel_forward call\n batch_size = dense_x.size()[0]\n ndevices = min(self.ndevices, batch_size, len(self.emb_l))\n device_ids = range(ndevices)\n # WARNING: must redistribute the model if mini-batch size changes(this is common\n # for last mini-batch, when # of elements in the dataset/batch size is not even\n if self.parallel_model_batch_size != batch_size:\n self.parallel_model_is_not_prepared = True\n\n if self.parallel_model_is_not_prepared or self.sync_dense_params:\n # replicate mlp (data parallelism)\n self.bot_l_replicas = replicate(self.bot_l, device_ids)\n self.top_l_replicas = replicate(self.top_l, device_ids)\n self.parallel_model_batch_size = batch_size\n\n if self.parallel_model_is_not_prepared:\n # distribute embeddings (model parallelism)\n t_list = []\n for k, emb in enumerate(self.emb_l):\n d = torch.device(\"cuda:\" + str(k % ndevices))\n emb.to(d)\n t_list.append(emb.to(d))\n self.emb_l = nn.ModuleList(t_list)\n self.parallel_model_is_not_prepared = False\n\n ### prepare input (overwrite) ###\n # scatter dense features (data parallelism)\n # print(dense_x.device)\n dense_x = scatter(dense_x, device_ids, dim=0)\n # distribute sparse features (model parallelism)\n if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):\n sys.exit(\"ERROR: corrupted model input detected in parallel_forward call\")\n\n t_list = []\n i_list = []\n for k, _ in enumerate(self.emb_l):\n d = torch.device(\"cuda:\" + str(k % ndevices))\n t_list.append(lS_o[k].to(d))\n i_list.append(lS_i[k].to(d))\n lS_o = t_list\n lS_i = i_list\n\n ### compute results in parallel ###\n # bottom mlp\n # WARNING: Note that the self.bot_l is a list of bottom mlp modules\n # that have been replicated across devices, while dense_x is a tuple of dense\n # inputs that has been scattered across devices on the first (batch) dimension.\n # The output is a list of tensors scattered across devices according to the\n # distribution of dense_x.\n x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids)\n # debug prints\n # print(x)\n\n # embeddings\n ly = self.apply_emb(lS_o, lS_i, self.emb_l)\n # debug prints\n # print(ly)\n\n # butterfly shuffle (implemented inefficiently for now)\n # WARNING: Note that at this point we have the result of the embedding lookup\n # for the entire batch on each device. We would like to obtain partial results\n # corresponding to all embedding lookups, but part of the batch on each device.\n # Therefore, matching the distribution of output of bottom mlp, so that both\n # could be used for subsequent interactions on each device.\n if len(self.emb_l) != len(ly):\n sys.exit(\"ERROR: corrupted intermediate result in parallel_forward call\")\n\n t_list = []\n for k, _ in enumerate(self.emb_l):\n d = torch.device(\"cuda:\" + str(k % ndevices))\n y = scatter(ly[k], device_ids, dim=0)\n t_list.append(y)\n # adjust the list to be ordered per device\n ly = list(map(lambda y: list(y), zip(*t_list)))\n # debug prints\n # print(ly)\n\n # interactions\n z = []\n for k in range(ndevices):\n zk = self.interact_features(x[k], ly[k])\n z.append(zk)\n # debug prints\n # print(z)\n\n # top mlp\n # WARNING: Note that the self.top_l is a list of top mlp modules that\n # have been replicated across devices, while z is a list of interaction results\n # that by construction are scattered across devices on the first (batch) dim.\n # The output is a list of tensors scattered across devices according to the\n # distribution of z.\n p = parallel_apply(self.top_l_replicas, z, None, device_ids)\n\n ### gather the distributed results ###\n p0 = gather(p, self.output_d, dim=0)\n\n # clamp output if needed\n if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:\n z0 = torch.clamp(\n p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold)\n )\n else:\n z0 = p0\n\n return z0\n\n\nif __name__ == \"__main__\":\n ### import packages ###\n import sys\n import argparse\n\n ### parse arguments ###\n parser = argparse.ArgumentParser(\n description=\"Train Deep Learning Recommendation Model (DLRM)\"\n )\n # model related parameters\n parser.add_argument(\"--arch-sparse-feature-size\", type=int, default=2)\n parser.add_argument(\"--arch-embedding-size\", type=str, default=\"4-3-2\")\n # j will be replaced with the table number\n parser.add_argument(\"--arch-mlp-bot\", type=str, default=\"4-3-2\")\n parser.add_argument(\"--arch-mlp-top\", type=str, default=\"4-2-1\")\n parser.add_argument(\"--arch-interaction-op\", type=str, default=\"dot\")\n parser.add_argument(\"--arch-interaction-itself\", action=\"store_true\", default=False)\n # embedding table options\n parser.add_argument(\"--md-flag\", action=\"store_true\", default=False)\n parser.add_argument(\"--md-threshold\", type=int, default=200)\n parser.add_argument(\"--md-temperature\", type=float, default=0.3)\n parser.add_argument(\"--md-round-dims\", action=\"store_true\", default=False)\n parser.add_argument(\"--qr-flag\", action=\"store_true\", default=False)\n parser.add_argument(\"--qr-threshold\", type=int, default=200)\n parser.add_argument(\"--qr-operation\", type=str, default=\"mult\")\n parser.add_argument(\"--qr-collisions\", type=int, default=4)\n # activations and loss\n parser.add_argument(\"--activation-function\", type=str, default=\"relu\")\n parser.add_argument(\"--loss-function\", type=str, default=\"mse\") # or bce or wbce\n parser.add_argument(\"--loss-weights\", type=str, default=\"1.0-1.0\") # for wbce\n parser.add_argument(\"--loss-threshold\", type=float, default=0.0) # 1.0e-7\n parser.add_argument(\"--round-targets\", type=bool, default=False)\n # data\n parser.add_argument(\"--data-size\", type=int, default=1)\n parser.add_argument(\"--num-batches\", type=int, default=0)\n parser.add_argument(\n \"--data-generation\", type=str, default=\"random\"\n ) # synthetic or dataset\n parser.add_argument(\"--data-trace-file\", type=str, default=\"./input/dist_emb_j.log\")\n parser.add_argument(\"--data-set\", type=str, default=\"kaggle\") # or terabyte\n parser.add_argument(\"--raw-data-file\", type=str, default=\"\")\n parser.add_argument(\"--processed-data-file\", type=str, default=\"\")\n parser.add_argument(\"--data-randomize\", type=str, default=\"total\") # or day or none\n parser.add_argument(\"--data-trace-enable-padding\", type=bool, default=False)\n parser.add_argument(\"--max-ind-range\", type=int, default=-1)\n parser.add_argument(\"--data-sub-sample-rate\", type=float, default=0.0) # in [0, 1]\n parser.add_argument(\"--num-indices-per-lookup\", type=int, default=10)\n parser.add_argument(\"--num-indices-per-lookup-fixed\", type=bool, default=False)\n parser.add_argument(\"--num-workers\", type=int, default=0)\n parser.add_argument(\"--memory-map\", action=\"store_true\", default=False)\n # training\n parser.add_argument(\"--mini-batch-size\", type=int, default=1)\n parser.add_argument(\"--nepochs\", type=int, default=1)\n parser.add_argument(\"--learning-rate\", type=float, default=0.01)\n parser.add_argument(\"--print-precision\", type=int, default=5)\n parser.add_argument(\"--numpy-rand-seed\", type=int, default=123)\n parser.add_argument(\"--sync-dense-params\", type=bool, default=True)\n # inference\n parser.add_argument(\"--inference-only\", action=\"store_true\", default=False)\n # onnx\n parser.add_argument(\"--save-onnx\", action=\"store_true\", default=False)\n # gpu\n parser.add_argument(\"--use-gpu\", action=\"store_true\", default=False)\n # debugging and profiling\n parser.add_argument(\"--print-freq\", type=int, default=1)\n parser.add_argument(\"--test-freq\", type=int, default=-1)\n parser.add_argument(\"--test-mini-batch-size\", type=int, default=-1)\n parser.add_argument(\"--test-num-workers\", type=int, default=-1)\n parser.add_argument(\"--print-time\", action=\"store_true\", default=False)\n parser.add_argument(\"--debug-mode\", action=\"store_true\", default=False)\n parser.add_argument(\"--enable-profiling\", action=\"store_true\", default=False)\n parser.add_argument(\"--plot-compute-graph\", action=\"store_true\", default=False)\n # store/load model\n parser.add_argument(\"--save-model\", type=str, default=\"\")\n parser.add_argument(\"--load-model\", type=str, default=\"\")\n # mlperf logging (disables other output and stops early)\n parser.add_argument(\"--mlperf-logging\", action=\"store_true\", default=False)\n # stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107\n parser.add_argument(\"--mlperf-acc-threshold\", type=float, default=0.0)\n # stop at target AUC Terabyte (no subsampling) 0.8025\n parser.add_argument(\"--mlperf-auc-threshold\", type=float, default=0.0)\n parser.add_argument(\"--mlperf-bin-loader\", action='store_true', default=False)\n parser.add_argument(\"--mlperf-bin-shuffle\", action='store_true', default=False)\n args = parser.parse_args()\n\n if args.mlperf_logging:\n print('command line args: ', json.dumps(vars(args)))\n\n ### some basic setup ###\n np.random.seed(args.numpy_rand_seed)\n np.set_printoptions(precision=args.print_precision)\n torch.set_printoptions(precision=args.print_precision)\n torch.manual_seed(args.numpy_rand_seed)\n\n if (args.test_mini_batch_size < 0):\n # if the parameter is not set, use the training batch size\n args.test_mini_batch_size = args.mini_batch_size\n if (args.test_num_workers < 0):\n # if the parameter is not set, use the same parameter for training\n args.test_num_workers = args.num_workers\n\n use_gpu = args.use_gpu and torch.cuda.is_available()\n if use_gpu:\n torch.cuda.manual_seed_all(args.numpy_rand_seed)\n torch.backends.cudnn.deterministic = True\n device = torch.device(\"cuda\", 0)\n ngpus = torch.cuda.device_count() # 1\n print(\"Using {} GPU(s)...\".format(ngpus))\n else:\n device = torch.device(\"cpu\")\n print(\"Using CPU...\")\n\n ### prepare training data ###\n ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep=\"-\")\n # input data\n if (args.data_generation == \"dataset\"):\n\n train_data, train_ld, test_data, test_ld = \\\n dp.make_criteo_data_and_loaders(args)\n nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)\n nbatches_test = len(test_ld)\n\n ln_emb = train_data.counts\n # enforce maximum limit on number of vectors per embedding\n if args.max_ind_range > 0:\n ln_emb = np.array(list(map(\n lambda x: x if x < args.max_ind_range else args.max_ind_range,\n ln_emb\n )))\n m_den = train_data.m_den\n ln_bot[0] = m_den\n else:\n # input and target at random\n ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep=\"-\")\n m_den = ln_bot[0]\n train_data, train_ld = dp.make_random_data_and_loader(args, ln_emb, m_den)\n nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)\n\n ### parse command line arguments ###\n m_spa = args.arch_sparse_feature_size\n num_fea = ln_emb.size + 1 # num sparse + num dense features\n m_den_out = ln_bot[ln_bot.size - 1]\n if args.arch_interaction_op == \"dot\":\n # approach 1: all\n # num_int = num_fea * num_fea + m_den_out\n # approach 2: unique\n if args.arch_interaction_itself:\n num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out\n else:\n num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out\n elif args.arch_interaction_op == \"cat\":\n num_int = num_fea * m_den_out\n else:\n sys.exit(\n \"ERROR: --arch-interaction-op=\"\n + args.arch_interaction_op\n + \" is not supported\"\n )\n arch_mlp_top_adjusted = str(num_int) + \"-\" + args.arch_mlp_top\n ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep=\"-\")\n\n # sanity check: feature sizes and mlp dimensions must match\n if m_den != ln_bot[0]:\n sys.exit(\n \"ERROR: arch-dense-feature-size \"\n + str(m_den)\n + \" does not match first dim of bottom mlp \"\n + str(ln_bot[0])\n )\n if args.qr_flag:\n if args.qr_operation == \"concat\" and 2 * m_spa != m_den_out:\n sys.exit(\n \"ERROR: 2 arch-sparse-feature-size \"\n + str(2 * m_spa)\n + \" does not match last dim of bottom mlp \"\n + str(m_den_out)\n + \" (note that the last dim of bottom mlp must be 2x the embedding dim)\"\n )\n if args.qr_operation != \"concat\" and m_spa != m_den_out:\n sys.exit(\n \"ERROR: arch-sparse-feature-size \"\n + str(m_spa)\n + \" does not match last dim of bottom mlp \"\n + str(m_den_out)\n )\n else:\n if m_spa != m_den_out:\n sys.exit(\n \"ERROR: arch-sparse-feature-size \"\n + str(m_spa)\n + \" does not match last dim of bottom mlp \"\n + str(m_den_out)\n )\n if num_int != ln_top[0]:\n sys.exit(\n \"ERROR: # of feature interactions \"\n + str(num_int)\n + \" does not match first dimension of top mlp \"\n + str(ln_top[0])\n )\n\n # assign mixed dimensions if applicable\n if args.md_flag:\n m_spa = md_solver(\n torch.tensor(ln_emb),\n args.md_temperature, # alpha\n d0=m_spa,\n round_dim=args.md_round_dims\n ).tolist()\n\n # test prints (model arch)\n if args.debug_mode:\n print(\"model arch:\")\n print(\n \"mlp top arch \"\n + str(ln_top.size - 1)\n + \" layers, with input to output dimensions:\"\n )\n print(ln_top)\n print(\"# of interactions\")\n print(num_int)\n print(\n \"mlp bot arch \"\n + str(ln_bot.size - 1)\n + \" layers, with input to output dimensions:\"\n )\n print(ln_bot)\n print(\"# of features (sparse and dense)\")\n print(num_fea)\n print(\"dense feature size\")\n print(m_den)\n print(\"sparse feature size\")\n print(m_spa)\n print(\n \"# of embeddings (= # of sparse features) \"\n + str(ln_emb.size)\n + \", with dimensions \"\n + str(m_spa)\n + \"x:\"\n )\n print(ln_emb)\n\n print(\"data (inputs and targets):\")\n for j, (X, lS_o, lS_i, T) in enumerate(train_ld):\n # early exit if nbatches was set by the user and has been exceeded\n if nbatches > 0 and j >= nbatches:\n break\n\n print(\"mini-batch: %d\" % j)\n print(X.detach().cpu().numpy())\n # transform offsets to lengths when printing\n print(\n [\n np.diff(\n S_o.detach().cpu().tolist() + list(lS_i[i].shape)\n ).tolist()\n for i, S_o in enumerate(lS_o)\n ]\n )\n print([S_i.detach().cpu().tolist() for S_i in lS_i])\n print(T.detach().cpu().numpy())\n\n ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1\n\n ### construct the neural network specified above ###\n # WARNING: to obtain exactly the same initialization for\n # the weights we need to start from the same random seed.\n # np.random.seed(args.numpy_rand_seed)\n dlrm = DLRM_Net(\n m_spa,\n ln_emb,\n ln_bot,\n ln_top,\n arch_interaction_op=args.arch_interaction_op,\n arch_interaction_itself=args.arch_interaction_itself,\n sigmoid_bot=-1,\n sigmoid_top=ln_top.size - 2,\n sync_dense_params=args.sync_dense_params,\n loss_threshold=args.loss_threshold,\n ndevices=ndevices,\n qr_flag=args.qr_flag,\n qr_operation=args.qr_operation,\n qr_collisions=args.qr_collisions,\n qr_threshold=args.qr_threshold,\n md_flag=args.md_flag,\n md_threshold=args.md_threshold,\n )\n # test prints\n if args.debug_mode:\n print(\"initial parameters (weights and bias):\")\n for param in dlrm.parameters():\n print(param.detach().cpu().numpy())\n # print(dlrm)\n\n if use_gpu:\n # Custom Model-Data Parallel\n # the mlps are replicated and use data parallelism, while\n # the embeddings are distributed and use model parallelism\n dlrm = dlrm.to(device) # .cuda()\n if dlrm.ndevices > 1:\n dlrm.emb_l = dlrm.create_emb(m_spa, ln_emb)\n\n # specify the loss function\n if args.loss_function == \"mse\":\n loss_fn = torch.nn.MSELoss(reduction=\"mean\")\n elif args.loss_function == \"bce\":\n loss_fn = torch.nn.BCELoss(reduction=\"mean\")\n elif args.loss_function == \"wbce\":\n loss_ws = torch.tensor(np.fromstring(args.loss_weights, dtype=float, sep=\"-\"))\n loss_fn = torch.nn.BCELoss(reduction=\"none\")\n else:\n sys.exit(\"ERROR: --loss-function=\" + args.loss_function + \" is not supported\")\n\n if not args.inference_only:\n # specify the optimizer algorithm\n optimizer = torch.optim.SGD(dlrm.parameters(), lr=args.learning_rate)\n\n ### main loop ###\n def time_wrap(use_gpu):\n if use_gpu:\n torch.cuda.synchronize()\n return time.time()\n\n def dlrm_wrap(X, lS_o, lS_i, use_gpu, device):\n if use_gpu: # .cuda()\n # lS_i can be either a list of tensors or a stacked tensor.\n # Handle each case below:\n lS_i = [S_i.to(device) for S_i in lS_i] if isinstance(lS_i, list) \\\n else lS_i.to(device)\n lS_o = [S_o.to(device) for S_o in lS_o] if isinstance(lS_o, list) \\\n else lS_o.to(device)\n return dlrm(\n X.to(device),\n lS_o,\n lS_i\n )\n else:\n return dlrm(X, lS_o, lS_i)\n\n def loss_fn_wrap(Z, T, use_gpu, device):\n if args.loss_function == \"mse\" or args.loss_function == \"bce\":\n if use_gpu:\n return loss_fn(Z, T.to(device))\n else:\n return loss_fn(Z, T)\n elif args.loss_function == \"wbce\":\n if use_gpu:\n loss_ws_ = loss_ws[T.data.view(-1).long()].view_as(T).to(device)\n loss_fn_ = loss_fn(Z, T.to(device))\n else:\n loss_ws_ = loss_ws[T.data.view(-1).long()].view_as(T)\n loss_fn_ = loss_fn(Z, T.to(device))\n loss_sc_ = loss_ws_ * loss_fn_\n # debug prints\n # print(loss_ws_)\n # print(loss_fn_)\n return loss_sc_.mean()\n\n # training or inference\n best_gA_test = 0\n best_auc_test = 0\n total_time = 0\n total_loss = 0\n total_accu = 0\n total_iter = 0\n total_samp = 0\n k = 0\n\n # Load model is specified\n if not (args.load_model == \"\"):\n print(\"Loading saved model {}\".format(args.load_model))\n if use_gpu:\n if dlrm.ndevices > 1:\n # NOTE: when targeting inference on multiple GPUs,\n # load the model as is on CPU or GPU, with the move\n # to multiple GPUs to be done in parallel_forward\n ld_model = torch.load(args.load_model)\n else:\n # NOTE: when targeting inference on single GPU,\n # note that the call to .to(device) has already happened\n ld_model = torch.load(\n args.load_model,\n map_location=torch.device('cuda')\n # map_location=lambda storage, loc: storage.cuda(0)\n )\n else:\n # when targeting inference on CPU\n ld_model = torch.load(args.load_model, map_location=torch.device('cpu'))\n dlrm.load_state_dict(ld_model[\"state_dict\"])\n ld_j = ld_model[\"iter\"]\n ld_k = ld_model[\"epoch\"]\n ld_nepochs = ld_model[\"nepochs\"]\n ld_nbatches = ld_model[\"nbatches\"]\n ld_nbatches_test = ld_model[\"nbatches_test\"]\n ld_gA = ld_model[\"train_acc\"]\n ld_gL = ld_model[\"train_loss\"]\n ld_total_loss = ld_model[\"total_loss\"]\n ld_total_accu = ld_model[\"total_accu\"]\n ld_gA_test = ld_model[\"test_acc\"]\n ld_gL_test = ld_model[\"test_loss\"]\n if not args.inference_only:\n optimizer.load_state_dict(ld_model[\"opt_state_dict\"])\n best_gA_test = ld_gA_test\n total_loss = ld_total_loss\n total_accu = ld_total_accu\n k = ld_k # epochs\n j = ld_j # batches\n else:\n args.print_freq = ld_nbatches\n args.test_freq = 0\n\n print(\n \"Saved at: epoch = {:d}/{:d}, batch = {:d}/{:d}, ntbatch = {:d}\".format(\n ld_k, ld_nepochs, ld_j, ld_nbatches, ld_nbatches_test\n )\n )\n print(\n \"Training state: loss = {:.6f}, accuracy = {:3.3f} %\".format(\n ld_gL, ld_gA * 100\n )\n )\n print(\n \"Testing state: loss = {:.6f}, accuracy = {:3.3f} %\".format(\n ld_gL_test, ld_gA_test * 100\n )\n )\n\n print(\"time/loss/accuracy (if enabled):\")\n with torch.autograd.profiler.profile(args.enable_profiling, use_gpu) as prof:\n while k < args.nepochs:\n accum_time_begin = time_wrap(use_gpu)\n\n if args.mlperf_logging:\n previous_iteration_time = None\n\n for j, (X, lS_o, lS_i, T) in enumerate(train_ld):\n if args.mlperf_logging:\n current_time = time_wrap(use_gpu)\n if previous_iteration_time:\n iteration_time = current_time - previous_iteration_time\n else:\n iteration_time = 0\n previous_iteration_time = current_time\n else:\n t1 = time_wrap(use_gpu)\n\n # early exit if nbatches was set by the user and has been exceeded\n if nbatches > 0 and j >= nbatches:\n break\n '''\n # debug prints\n print(\"input and targets\")\n print(X.detach().cpu().numpy())\n print([np.diff(S_o.detach().cpu().tolist()\n + list(lS_i[i].shape)).tolist() for i, S_o in enumerate(lS_o)])\n print([S_i.detach().cpu().numpy().tolist() for S_i in lS_i])\n print(T.detach().cpu().numpy())\n '''\n\n # forward pass\n Z = dlrm_wrap(X, lS_o, lS_i, use_gpu, device)\n\n # loss\n E = loss_fn_wrap(Z, T, use_gpu, device)\n '''\n # debug prints\n print(\"output and loss\")\n print(Z.detach().cpu().numpy())\n print(E.detach().cpu().numpy())\n '''\n # compute loss and accuracy\n L = E.detach().cpu().numpy() # numpy array\n S = Z.detach().cpu().numpy() # numpy array\n T = T.detach().cpu().numpy() # numpy array\n mbs = T.shape[0] # = args.mini_batch_size except maybe for last\n A = np.sum((np.round(S, 0) == T).astype(np.uint8))\n\n if not args.inference_only:\n # scaled error gradient propagation\n # (where we do not accumulate gradients across mini-batches)\n optimizer.zero_grad()\n # backward pass\n E.backward()\n # debug prints (check gradient norm)\n # for l in mlp.layers:\n # if hasattr(l, 'weight'):\n # print(l.weight.grad.norm().item())\n\n # optimizer\n optimizer.step()\n\n if args.mlperf_logging:\n total_time += iteration_time\n else:\n t2 = time_wrap(use_gpu)\n total_time += t2 - t1\n total_accu += A\n total_loss += L * mbs\n total_iter += 1\n total_samp += mbs\n\n should_print = ((j + 1) % args.print_freq == 0) or (j + 1 == nbatches)\n should_test = (\n (args.test_freq > 0)\n and (args.data_generation == \"dataset\")\n and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches))\n )\n\n # print time, loss and accuracy\n if should_print or should_test:\n gT = 1000.0 * total_time / total_iter if args.print_time else -1\n total_time = 0\n\n gA = total_accu / total_samp\n total_accu = 0\n\n gL = total_loss / total_samp\n total_loss = 0\n\n str_run_type = \"inference\" if args.inference_only else \"training\"\n print(\n \"Finished {} it {}/{} of epoch {}, {:.2f} ms/it, \".format(\n str_run_type, j + 1, nbatches, k, gT\n )\n + \"loss {:.6f}, accuracy {:3.3f} %\".format(gL, gA * 100)\n )\n # Uncomment the line below to print out the total time with overhead\n # print(\"Accumulated time so far: {}\" \\\n # .format(time_wrap(use_gpu) - accum_time_begin))\n total_iter = 0\n total_samp = 0\n\n # testing\n if should_test and not args.inference_only:\n # don't measure training iter time in a test iteration\n if args.mlperf_logging:\n previous_iteration_time = None\n\n test_accu = 0\n test_loss = 0\n test_samp = 0\n\n accum_test_time_begin = time_wrap(use_gpu)\n if args.mlperf_logging:\n scores = []\n targets = []\n\n for i, (X_test, lS_o_test, lS_i_test, T_test) in enumerate(test_ld):\n # early exit if nbatches was set by the user and was exceeded\n if nbatches > 0 and i >= nbatches:\n break\n\n t1_test = time_wrap(use_gpu)\n\n # forward pass\n Z_test = dlrm_wrap(\n X_test, lS_o_test, lS_i_test, use_gpu, device\n )\n if args.mlperf_logging:\n S_test = Z_test.detach().cpu().numpy() # numpy array\n T_test = T_test.detach().cpu().numpy() # numpy array\n scores.append(S_test)\n targets.append(T_test)\n else:\n # loss\n E_test = loss_fn_wrap(Z_test, T_test, use_gpu, device)\n\n # compute loss and accuracy\n L_test = E_test.detach().cpu().numpy() # numpy array\n S_test = Z_test.detach().cpu().numpy() # numpy array\n T_test = T_test.detach().cpu().numpy() # numpy array\n mbs_test = T_test.shape[0] # = mini_batch_size except last\n A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8))\n test_accu += A_test\n test_loss += L_test * mbs_test\n test_samp += mbs_test\n\n t2_test = time_wrap(use_gpu)\n\n if args.mlperf_logging:\n scores = np.concatenate(scores, axis=0)\n targets = np.concatenate(targets, axis=0)\n\n metrics = {\n 'loss' : sklearn.metrics.log_loss,\n 'recall' : lambda y_true, y_score:\n sklearn.metrics.recall_score(\n y_true=y_true,\n y_pred=np.round(y_score)\n ),\n 'precision' : lambda y_true, y_score:\n sklearn.metrics.precision_score(\n y_true=y_true,\n y_pred=np.round(y_score)\n ),\n 'f1' : lambda y_true, y_score:\n sklearn.metrics.f1_score(\n y_true=y_true,\n y_pred=np.round(y_score)\n ),\n 'ap' : sklearn.metrics.average_precision_score,\n 'roc_auc' : sklearn.metrics.roc_auc_score,\n 'accuracy' : lambda y_true, y_score:\n sklearn.metrics.accuracy_score(\n y_true=y_true,\n y_pred=np.round(y_score)\n ),\n # 'pre_curve' : sklearn.metrics.precision_recall_curve,\n # 'roc_curve' : sklearn.metrics.roc_curve,\n }\n\n # print(\"Compute time for validation metric : \", end=\"\")\n # first_it = True\n validation_results = {}\n for metric_name, metric_function in metrics.items():\n # if first_it:\n # first_it = False\n # else:\n # print(\", \", end=\"\")\n # metric_compute_start = time_wrap(False)\n validation_results[metric_name] = metric_function(\n targets,\n scores\n )\n # metric_compute_end = time_wrap(False)\n # met_time = metric_compute_end - metric_compute_start\n # print(\"{} {:.4f}\".format(metric_name, 1000 * (met_time)),\n # end=\"\")\n # print(\" ms\")\n gA_test = validation_results['accuracy']\n gL_test = validation_results['loss']\n else:\n gA_test = test_accu / test_samp\n gL_test = test_loss / test_samp\n\n is_best = gA_test > best_gA_test\n if is_best:\n best_gA_test = gA_test\n if not (args.save_model == \"\"):\n print(\"Saving model to {}\".format(args.save_model))\n torch.save(\n {\n \"epoch\": k,\n \"nepochs\": args.nepochs,\n \"nbatches\": nbatches,\n \"nbatches_test\": nbatches_test,\n \"iter\": j + 1,\n \"state_dict\": dlrm.state_dict(),\n \"train_acc\": gA,\n \"train_loss\": gL,\n \"test_acc\": gA_test,\n \"test_loss\": gL_test,\n \"total_loss\": total_loss,\n \"total_accu\": total_accu,\n \"opt_state_dict\": optimizer.state_dict(),\n },\n args.save_model,\n )\n\n if args.mlperf_logging:\n is_best = validation_results['roc_auc'] > best_auc_test\n if is_best:\n best_auc_test = validation_results['roc_auc']\n\n print(\n \"Testing at - {}/{} of epoch {},\".format(j + 1, nbatches, k)\n + \" loss {:.6f}, recall {:.4f}, precision {:.4f},\".format(\n validation_results['loss'],\n validation_results['recall'],\n validation_results['precision']\n )\n + \" f1 {:.4f}, ap {:.4f},\".format(\n validation_results['f1'],\n validation_results['ap'],\n )\n + \" auc {:.4f}, best auc {:.4f},\".format(\n validation_results['roc_auc'],\n best_auc_test\n )\n + \" accuracy {:3.3f} %, best accuracy {:3.3f} %\".format(\n validation_results['accuracy'] * 100,\n best_gA_test * 100\n )\n )\n else:\n print(\n \"Testing at - {}/{} of epoch {},\".format(j + 1, nbatches, 0)\n + \" loss {:.6f}, accuracy {:3.3f} %, best {:3.3f} %\".format(\n gL_test, gA_test * 100, best_gA_test * 100\n )\n )\n # Uncomment the line below to print out the total time with overhead\n # print(\"Total test time for this group: {}\" \\\n # .format(time_wrap(use_gpu) - accum_test_time_begin))\n\n if (args.mlperf_logging\n and (args.mlperf_acc_threshold > 0)\n and (best_gA_test > args.mlperf_acc_threshold)):\n print(\"MLPerf testing accuracy threshold \"\n + str(args.mlperf_acc_threshold)\n + \" reached, stop training\")\n break\n\n if (args.mlperf_logging\n and (args.mlperf_auc_threshold > 0)\n and (best_auc_test > args.mlperf_auc_threshold)):\n print(\"MLPerf testing auc threshold \"\n + str(args.mlperf_auc_threshold)\n + \" reached, stop training\")\n break\n\n k += 1 # nepochs\n\n # profiling\n if args.enable_profiling:\n with open(\"dlrm_s_pytorch.prof\", \"w\") as prof_f:\n prof_f.write(prof.key_averages().table(sort_by=\"cpu_time_total\"))\n prof.export_chrome_trace(\"./dlrm_s_pytorch.json\")\n # print(prof.key_averages().table(sort_by=\"cpu_time_total\"))\n\n # plot compute graph\n if args.plot_compute_graph:\n sys.exit(\n \"ERROR: Please install pytorchviz package in order to use the\"\n + \" visualization. Then, uncomment its import above as well as\"\n + \" three lines below and run the code again.\"\n )\n # V = Z.mean() if args.inference_only else E\n # dot = make_dot(V, params=dict(dlrm.named_parameters()))\n # dot.render('dlrm_s_pytorch_graph') # write .pdf file\n\n # test prints\n if not args.inference_only and args.debug_mode:\n print(\"updated parameters (weights and bias):\")\n for param in dlrm.parameters():\n print(param.detach().cpu().numpy())\n\n # export the model in onnx\n if args.save_onnx:\n with open(\"dlrm_s_pytorch.onnx\", \"w+b\") as dlrm_pytorch_onnx_file:\n (X, lS_o, lS_i, _) = train_data[0] # get first batch of elements\n torch.onnx._export(\n dlrm, (X, lS_o, lS_i), dlrm_pytorch_onnx_file, verbose=True\n )\n # recover the model back\n dlrm_pytorch_onnx = onnx.load(\"dlrm_s_pytorch.onnx\")\n # check the onnx model\n onnx.checker.check_model(dlrm_pytorch_onnx)\n"
] | [
[
"torch.cat",
"torch.nn.EmbeddingBag",
"torch.nn.ModuleList",
"numpy.set_printoptions",
"torch.nn.parallel.scatter_gather.scatter",
"torch.set_printoptions",
"torch.cuda.is_available",
"torch.load",
"torch.transpose",
"torch.nn.parallel.replicate.replicate",
"numpy.fromstring",
"numpy.concatenate",
"torch.nn.parallel.scatter_gather.gather",
"numpy.random.normal",
"torch.manual_seed",
"torch.tensor",
"numpy.sqrt",
"torch.nn.BCELoss",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.onnx._export",
"torch.nn.Sequential",
"numpy.round",
"torch.clamp",
"torch.cuda.device_count",
"torch.nn.ReLU",
"torch.nn.parallel.parallel_apply.parallel_apply",
"torch.nn.MSELoss",
"torch.cuda.synchronize",
"torch.nn.Sigmoid",
"numpy.random.seed",
"torch.autograd.profiler.profile"
]
] |
cliveseldon/examples | [
"c93b792d67c8c52bc91d4ccf5fbaead4e2324331"
] | [
"github_issue_summarization/workflow/workspace/src/recommend.py"
] | [
"import argparse\nimport keras\nimport pandas as pd\nfrom seq2seq_utils import load_text_processor\nfrom seq2seq_utils import Seq2Seq_Inference\n\n# Parsing flags.\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_csv\")\nparser.add_argument(\"--input_model_h5\")\nparser.add_argument(\"--input_body_preprocessor_dpkl\")\nparser.add_argument(\"--input_title_preprocessor_dpkl\")\nparser.add_argument(\"--input_testdf_csv\")\nparser.add_argument(\"--input_topic_number\", type=int, default=1)\nargs = parser.parse_args()\nprint(args)\n\n# Read data.\nall_data_df = pd.read_csv(args.input_csv)\ntestdf = pd.read_csv(args.input_testdf_csv)\n\n# Load model, preprocessors.\nnum_encoder_tokens, body_pp = load_text_processor(args.input_body_preprocessor_dpkl)\nnum_decoder_tokens, title_pp = load_text_processor(args.input_title_preprocessor_dpkl)\nseq2seq_Model = keras.models.load_model(args.input_model_h5)\n\n# Prepare the recommender.\nall_data_bodies = all_data_df['body'].tolist()\nall_data_vectorized = body_pp.transform_parallel(all_data_bodies)\nseq2seq_inf_rec = Seq2Seq_Inference(encoder_preprocessor=body_pp,\n decoder_preprocessor=title_pp,\n seq2seq_model=seq2seq_Model)\nrecsys_annoyobj = seq2seq_inf_rec.prepare_recommender(all_data_vectorized, all_data_df)\n\n# Output recommendations for n topics.\nseq2seq_inf_rec.demo_model_predictions(n=args.input_topic_number, issue_df=testdf, threshold=1)\n"
] | [
[
"pandas.read_csv"
]
] |
zhazhijibaba/zhazhijibaba_programming_lessons | [
"124bd0166796ee3af947917c84e00679274100ca"
] | [
"programming_lesson3/rmsd.py"
] | [
"import numpy as np\n\ndef centroid(X):\n \"\"\"\n Centroid is the mean position of all the points in all of the coordinate\n directions, from a vectorset X.\n https://en.wikipedia.org/wiki/Centroid\n C = sum(X)/len(X)\n Parameters\n ----------\n X : array\n (N,D) matrix, where N is points and D is dimension.\n Returns\n -------\n C : float\n centroid\n \"\"\"\n C = X.mean(axis=0)\n return C\n\ndef rmsd(V, W):\n \"\"\"\n Calculate Root-mean-square deviation from two sets of vectors V and W.\n Parameters\n ----------\n V : array\n (N,D) matrix, where N is points and D is dimension.\n W : array\n (N,D) matrix, where N is points and D is dimension.\n Returns\n -------\n rmsd : float\n Root-mean-square deviation between the two vectors\n \"\"\"\n D = len(V[0])\n N = len(V)\n result = 0.0\n for v, w in zip(V, W):\n result += sum([(v[i] - w[i])**2.0 for i in range(D)])\n return np.sqrt(result/N)\n\n\ndef kabsch_rmsd(P, Q, translate=False):\n \"\"\"\n Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.\n Parameters\n ----------\n P : array\n (N,D) matrix, where N is points and D is dimension.\n Q : array\n (N,D) matrix, where N is points and D is dimension.\n translate : bool\n Use centroids to translate vector P and Q unto each other.\n Returns\n -------\n rmsd : float\n root-mean squared deviation\n \"\"\"\n if translate:\n Q = Q - centroid(Q)\n P = P - centroid(P)\n\n P = kabsch_rotate(P, Q)\n return rmsd(P, Q)\n\n\ndef kabsch_rotate(P, Q):\n \"\"\"\n Rotate matrix P unto matrix Q using Kabsch algorithm.\n Parameters\n ----------\n P : array\n (N,D) matrix, where N is points and D is dimension.\n Q : array\n (N,D) matrix, where N is points and D is dimension.\n Returns\n -------\n P : array\n (N,D) matrix, where N is points and D is dimension,\n rotated\n \"\"\"\n U = kabsch(P, Q)\n\n # Rotate P\n P = np.dot(P, U)\n return P\n\n\ndef kabsch(P, Q):\n \"\"\"\n Using the Kabsch algorithm with two sets of paired point P and Q, centered\n around the centroid. Each vector set is represented as an NxD\n matrix, where D is the the dimension of the space.\n The algorithm works in three steps:\n - a centroid translation of P and Q (assumed done before this function\n call)\n - the computation of a covariance matrix C\n - computation of the optimal rotation matrix U\n For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm\n Parameters\n ----------\n P : array\n (N,D) matrix, where N is points and D is dimension.\n Q : array\n (N,D) matrix, where N is points and D is dimension.\n Returns\n -------\n U : matrix\n Rotation matrix (D,D)\n \"\"\"\n\n # Computation of the covariance matrix\n C = np.dot(np.transpose(P), Q)\n\n # Computation of the optimal rotation matrix\n # This can be done using singular value decomposition (SVD)\n # Getting the sign of the det(V)*(W) to decide\n # whether we need to correct our rotation matrix to ensure a\n # right-handed coordinate system.\n # And finally calculating the optimal rotation matrix U\n # see http://en.wikipedia.org/wiki/Kabsch_algorithm\n V, S, W = np.linalg.svd(C)\n d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0\n\n if d:\n S[-1] = -S[-1]\n V[:, -1] = -V[:, -1]\n\n # Create Rotation matrix U\n U = np.dot(V, W)\n\n return U\n"
] | [
[
"numpy.dot",
"numpy.linalg.det",
"numpy.transpose",
"numpy.linalg.svd",
"numpy.sqrt"
]
] |
PierreMsy/DRL_cooperation | [
"0385f4c88857659f44ddd5fc8c5c6c33344a38cc"
] | [
"marl_coop/agent/self_maddpg_agent.py"
] | [
"import numpy as np\nimport torch\n\nfrom marl_coop.agent import Base_agent\nfrom marl_coop.model import Actor_network_creator, Critic_network_creator\nfrom marl_coop.model.component import CriterionCreator\nfrom marl_coop.component import BufferCreator, NoiseCreator\nfrom marl_coop.utils.helper import to_np\n\n\nclass SelfMADDPG_agent(Base_agent):\n '''\n Multiple Agent Deep Deterministic Gradient implementation using a single actor & critic.\n\n Attempt at easing the training of MADDPG agents in symetrical environments where the same actor and critic\n can be used after a rearangment of the input tensors.\n '''\n def __init__(self, context, config):\n '''\n Store the config and the context.\n Instantiate the utilities: the noise function and the replay buffer.\n Instantiate the critic and the actor networks.\n\n Args:\n context : RL information such as state & action size.\n config : configuration of the agent and all its subparts.\n '''\n self.context = context\n self.config = config\n\n self.buffer = BufferCreator().create(config.buffer)\n self.noise = NoiseCreator().create(\n config.noise.method, context.action_size, config.noise.kwargs)\n\n self.actor_network = Actor_network_creator().create(context, config.actor)\n self.actor_target_network = Actor_network_creator().create(context, config.actor)\n self.actor_target_network.load_state_dict(self.actor_network.state_dict())\n\n self.critic_network = Critic_network_creator().create(context, config.critic)\n self.critic_target_network = Critic_network_creator().create(context, config.critic)\n self.critic_target_network.load_state_dict(self.critic_network.state_dict())\n self.critic_criterion = CriterionCreator().create(config.critic.criterion)\n\n self.t_step = 0\n\n @property\n def agents_named(self): \n return {'agent': self}\n\n def act(self, obss_full, noise=False):\n '''\n Take an action for each observations inputted.\n\n Args:\n obss_full (np.ndarray): [(batch) * nbr_agents * obs_size]\n observations (or batch of) of all agents.\n noise (bool): Add or not noise to the outputted actions \n '''\n obss_full = torch.from_numpy(obss_full).float().to(self.config.device)\n\n self.actor_network.eval()\n with torch.no_grad():\n actions = np.array([to_np(self.actor_network.forward(obss)) for obss in obss_full])\n self.actor_network.train()\n\n if noise:\n actions = np.clip(actions + self.noise.sample_multipe(self.context.nbr_agents),\n self.context.action_min, self.context.action_max)\n self.noise.update()\n\n return actions\n\n def step(self, obs_full, action_full, reward_full, next_obs_full, done_full):\n '''\n Store an interaction as a experience tuple and make the sub-agents learn when required. \n Experience are stored in the replay buffer as: \n (x, a1, ... ,an, r, x', done) with x the observations of all the agents.\n '''\n # transform [1,0,1] into [[1],[0],[1]]\n done_full = np.array(done_full).reshape(-1,1)\n reward_full = np.array(reward_full).reshape(-1,1)\n\n if self.config.buffer.type == 'prioritized':\n raise Exception('not yet implemented')\n else:\n self.buffer.add(obs_full, action_full, reward_full, next_obs_full, done_full)\n if (len(self.buffer) >= self.config.batch_size) & (self.t_step % self.config.update_every == 0):\n self.learn()\n\n def learn(self):\n '''\n Sample experiences from the replay buffer and updates the critic and the actor.\n \n The critic use a concatenation of the observations/actions from all agents to learn how to evaluate\n a situation in a stationary setting as describe in https://arxiv.org/pdf/1706.02275.pdf.\n The actor only operate on local data.\n\n For each agent a view of the data according to that agent is created resulting in \n [nbr_agents * batch_size] updates at each learning.\n\n - The critic is updates based uppon a temporal difference error of the state-action value function\n using the actor to compute the action from the next state.\n error to minimize w.r.t w : r + γ * Q'_w'(o(t+1), µ'_θ'(o(t+1))) - Q_w(o(t),a(t))\n\n - The actor is updated using direct approximates of the state-action values from the critic.\n value to maximize w.r.t θ : Q_w(o(t), µ_θ(o(t))) \n '''\n obss_full, actions_full, rewards, next_obss_full, dones = self.buffer.sample()\n dones = torch.stack(dones)\n rewards = torch.stack(rewards)\n\n next_actions_full = [self.actor_target_network.forward(obss) for obss in obss_full]\n next_obss_by_agent = _create_view_by_agent(next_obss_full, self.context.nbr_agents)\n next_actions_by_agent = _create_view_by_agent(next_actions_full, self.context.nbr_agents)\n with torch.no_grad():\n Q_value_nexts = self.critic_target_network(next_obss_by_agent, next_actions_by_agent)\n Q_value_targets = rewards + self.config.gamma * Q_value_nexts * (1 - dones)\n\n obss_by_agent = _create_view_by_agent(obss_full, self.context.nbr_agents)\n actions_by_agent = _create_view_by_agent(actions_full, self.context.nbr_agents)\n Q_values = self.critic_network(obss_by_agent, actions_by_agent)\n\n critic_loss = self.critic_criterion(Q_values, Q_value_targets)\n self.critic_network.optimizer.zero_grad()\n critic_loss.backward()\n if self.config.use_gradient_clipping:\n torch.nn.utils.clip_grad_norm_(self.critic_network.parameters(), 1)\n self.critic_network.optimizer.step()\n\n actions_taken_full = [self.actor_target_network.forward(obss) for obss in obss_full]\n actions_taken_by_agent = _create_view_by_agent(actions_taken_full, self.context.nbr_agents)\n Q_values = self.critic_target_network(obss_by_agent, actions_taken_by_agent) \n\n actor_loss = - (Q_values).mean()\n self.actor_network.optimizer.zero_grad()\n actor_loss.backward()\n self.actor_network.optimizer.step()\n\n soft_update(self.actor_target_network, self.actor_network, self.config.tau)\n soft_update(self.critic_target_network, self.critic_network, self.config.tau)\n\ndef soft_update(target_network, netwok, tau):\n '''\n net_weights = (1-τ) * net_weights + τ * target_net_weights \n ''' \n for target_param, local_param in zip(target_network.parameters(), netwok.parameters()):\n target_param.data.copy_(\n (1.0 - tau) * target_param.data + tau * local_param.data)\n\ndef _create_view_by_agent(x, nbr_agents):\n '''\n Create a view of a list of tensors by agent by perfrming a cyrcular permutation of the data :\n [tensors(agent_0), tensors(agent_1), ..., tensors(agent_n)] =>\n for agent 0 : tensors(tensors(agent_0) + tensors(agent_1) + ... + tensors(agent_n))\n for agent 1 : tensors(tensors(agent_1) + tensors(agent_2) + ... + tensors(agent_0))\n ... \n for agent n : tensors(tensors(agent_n) + tensors(agent_0) + ... + tensors(agent_n-1))\n\n Args:\n x (list[torch.tensor]): list of tensors to arrange to create a view by agent.\n nbr_agents (int): number of agents.\n\n Returns:\n list[torch.tensor]: One arrangment of tensors where the data of the agent is first for each agents.\n '''\n res = [\n torch.roll(torch.stack(x), i, dims=0) \n for i in range(nbr_agents)]\n return res\n\n "
] | [
[
"numpy.array",
"torch.no_grad",
"torch.stack",
"torch.from_numpy"
]
] |
Rigonz/PopDensity_SatelliteNightLight | [
"88b0fae1e09984e08506063908d9c7fce6dc2229"
] | [
"POP CHECK R0 py36.py"
] | [
"'''\nCreated on: see version log.\n@author: rigonz\ncoding: utf-8\n\nIMPORTANT: requires py3.6 (rasterio)\n\nScript that:\n1) reads a series of raster files,\n2) runs some checks,\n3) makes charts showing the results.\n\nThe input data corresponds to a region of the world (ESP) and represents\nthe population density (pop/km2).\nEach file has from a data provider, or different calculation conditions.\n\nThe checks consist in verifying that the input files refer to the same region\nand to some intercomparison indicators.\n\nThe charts show the correlation among the different input data, as tuples\nassociated to the same geographical location.\n\nVersion log.\nR0 (20210512):\nFirst trials, seems to work well.\n\n'''\n\n# %% Imports.\nimport rasterio # IMPORTANT: requires py3.6\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# %% Directories.\nRootDirIn = 'D:/0 DOWN/zz EXTSave/GIS/POP/EUR/SHP/'\n\n# Filenames:\nFileNameI1 = RootDirIn + 'WP/ESP_clip_pd_2020_1km_UNadj.tif'\nFileNameI2 = RootDirIn + 'WP/ESP_clip_ppp_2020_1km_Aggregated_UNadj_d.tif'\nFileNameI3 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_rev11_2020_30_sec.tif'\nFileNameI4 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_adjusted_to_2015_unwpp_country_totals_rev11_2020_30_sec.tif'\n\n# %% Read data.\n# Open files:\nprint('Opening and reading the files...')\nds1 = rasterio.open(FileNameI1)\nds2 = rasterio.open(FileNameI2)\nds3 = rasterio.open(FileNameI3)\nds4 = rasterio.open(FileNameI4)\n\n# Read data:\nband1 = ds1.read(1)\nband2 = ds2.read(1)\nband3 = ds3.read(1)\nband4 = ds4.read(1)\n\n# %% Check the datasets.\nprint('Checking the data...')\n# Bounds:\nif not(ds1.bounds == ds2.bounds and ds2.bounds == ds3.bounds and\n ds3.bounds == ds4.bounds):\n print('WARNING: bounds are not the same:')\n print(ds1.bounds)\n print(ds2.bounds)\n print(ds3.bounds)\n print(ds4.bounds)\n\n# Width and height:\nif not(ds1.width == ds2.width and ds2.width == ds3.width and\n ds3.width == ds4.width):\n print('WARNING: widths are not the same:')\n print(ds1.width)\n print(ds2.width)\n print(ds3.width)\n print(ds4.width)\n\nif not(ds1.height == ds2.height and ds2.height == ds3.height and\n ds3.height == ds4.height):\n print('WARNING: heights are not the same:')\n print(ds1.height)\n print(ds2.height)\n print(ds3.height)\n print(ds4.height)\n\n# Bands:\nif not(ds1.indexes[0] == ds2.indexes[0] and ds2.indexes[0] == ds3.indexes[0]\n and ds3.indexes[0] == ds4.indexes[0]):\n print('WARNING: bands are not the same:')\n print(ds1.indexes[0])\n print(ds2.indexes[0])\n print(ds3.indexes[0])\n print(ds4.indexes[0])\n\n# Dimensions:\nif not(ds1.shape == ds2.shape and ds2.shape == ds3.shape and\n ds3.shape == ds4.shape):\n print('WARNING: shapes are not the same:')\n print(ds1.shape)\n print(ds2.shape)\n print(ds3.shape)\n print(ds4.shape)\n\n# CRS:\ntry:\n if (ds1.crs.data['init'] != 'epsg:4326' or\n ds2.crs.data['init'] != 'epsg:4326' or\n ds3.crs.data['init'] != 'epsg:4326' or\n ds4.crs.data['init'] != 'epsg:4326'):\n print('WARNING: CRS is not EPSG:4326.')\nexcept:\n print('WARNING: CRS is not available or is not EPSG:4326:')\n\n# %% Create new bands.\nprint('Checking the new bands...')\n# Remain within the boundaries of data:\nleft = max(ds1.bounds.left, ds2.bounds.left, ds3.bounds.left, ds4.bounds.left)\ntop = min(ds1.bounds.top, ds2.bounds.top, ds3.bounds.top, ds4.bounds.top)\nright = min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right)\nbottom = max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom)\nres = 1 / 120. # 30 arc-sec, approx 100 m; should be min() etc.\n\nheight = int(np.ceil((top - bottom) / res + 1))\nwidth = int(np.ceil((right - left) / res + 1))\n\nres_x = (right - left) / (width - 1)\nres_y = (top - bottom) / (height - 1)\n\n# Check (valid for east + north hemispheres only!):\nif right > min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right):\n print('WARNING: right boundary exceeded.')\nif bottom > max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom):\n print('WARNING: bottom boundary exceeded.')\n\n# Create new bands:\nprint('Creating the new bands...')\nb1 = np.full((height, width), 0.)\nb2 = np.full((height, width), 0.)\nb3 = np.full((height, width), 0.)\nb4 = np.full((height, width), 0.)\n\n# Populate the new bands:\ncount = 0\nfor i in range(0, height-1, 1):\n for j in range(0, width-1, 1):\n x, y = (left + j * res_x, top - i * res_y)\n row, col = ds1.index(x, y)\n b1[i, j] = band1[row, col]\n row, col = ds2.index(x, y)\n b2[i, j] = band2[row, col]\n row, col = ds3.index(x, y)\n b3[i, j] = band3[row, col]\n row, col = ds4.index(x, y)\n b4[i, j] = band4[row, col]\n\n # Show the progress:\n if count % height % 50 == 0:\n print('Progress... {:4.1f}%'.format(count/height*100))\n count += 1\n\n# %% Flatten and clear nodata.\nprint('Preparing the new bands...')\nb1f = b1.flatten()\nb2f = b2.flatten()\nb3f = b3.flatten()\nb4f = b4.flatten()\n\n# Remove only nodata, retain 0s:\nb_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) < 0)\nb1fm = np.delete(b1f, b_mask)\nb2fm = np.delete(b2f, b_mask)\nb3fm = np.delete(b3f, b_mask)\nb4fm = np.delete(b4f, b_mask)\n\n# %% Compute correlations.\nprint('Pearson coeff. after removing the no-data:')\nprint('DS1-2 = {:4.3f}.'.format(np.corrcoef(b1fm, b2fm)[0, 1]))\nprint('DS1-3 = {:4.3f}.'.format(np.corrcoef(b1fm, b3fm)[0, 1]))\nprint('DS1-4 = {:4.3f}.'.format(np.corrcoef(b1fm, b4fm)[0, 1]))\nprint('DS2-3 = {:4.3f}.'.format(np.corrcoef(b2fm, b3fm)[0, 1]))\nprint('DS2-4 = {:4.3f}.'.format(np.corrcoef(b2fm, b4fm)[0, 1]))\nprint('DS3-4 = {:4.3f}.'.format(np.corrcoef(b3fm, b4fm)[0, 1]))\n\n# %% Draw histograms.\n# Auxiliaries:\ncolor = ['k', 'r', 'b', 'g']\nlabel = ['DS1', 'DS2', 'DS3', 'DS4']\n\n# Plot:\nplt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)\n\n# Etc:\nplt.title('DS=>0', loc='right')\nplt.xlabel('pop. density, hab/km2')\nplt.ylabel('count')\nplt.grid(True)\nplt.legend()\nplt.show()\n\n# Zoom at the right tail:\n# Plot:\nplt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)\n\n# Etc:\nplt.title('DS>=0', loc='right')\nplt.xlabel('pop. density, hab/km2')\nplt.ylabel('count')\nplt.grid(True)\nplt.legend()\n#•plt.xlim(1500, 40000)\nplt.ylim(0, 7500)\nplt.show()\n\n\n# %% Draw chart.\n# Auxiliaries:\ncolor = ['k', 'r', 'b', 'g']\n\n# Plot:\nplt.figure(1, figsize=(4, 4), dpi=300)\n# plt.scatter(b1fm, b3fm, color=color[0], s=1.0, label='1-3', alpha=0.1)\n# plt.scatter(b1fm, b4fm, color=color[1], s=1.0, label='1-4', alpha=0.1)\nplt.scatter(b2fm, b3fm, color=color[2], s=1.0, label='2-3', alpha=0.1)\n\n\n# Titles:\nplt.title('PD>=0', loc='right')\nplt.xlabel('pop. density, hab/km2')\nplt.ylabel('pop. density, hab/km2')\n\n# Etc:\nplt.grid(True)\nplt.legend()\nplt.tight_layout()\n\n# Take a look:\nplt.show()\n\n# %% Draw heatmap.\n# Remove 0s:\nb_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) <= 0)\nb1fm = np.delete(b1f, b_mask)\nb2fm = np.delete(b2f, b_mask)\nb3fm = np.delete(b3f, b_mask)\nb4fm = np.delete(b4f, b_mask)\n\n# Plot:\nplt.hist2d(np.log10(b2fm), np.log10(b3fm), bins=100, cmap='binary')\n\n# Colorbar:\ncb = plt.colorbar()\ncb.set_label('Number of entries')\n\n# Etc:\nplt.title('PD>0', loc='right')\nplt.xlabel('log10_DS2 pop. density, hab/km2')\nplt.ylabel('log10_DS3 pop. density, hab/km2')\nplt.tight_layout()\nplt.show()\n\n# %% Script done.\nprint('\\nScript completed. Thanks!')\n"
] | [
[
"numpy.full",
"numpy.delete",
"matplotlib.pyplot.colorbar",
"numpy.ceil",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.tight_layout",
"numpy.corrcoef",
"matplotlib.pyplot.show",
"numpy.log10"
]
] |
justCodeThings/rockpaperscissors | [
"46ea5b9b4ddb8d0b3fcd181dfadd9ca587f5a30d"
] | [
"machinevmachine.py"
] | [
"from sklearn.linear_model import LinearRegression\nfrom time import sleep, time\nimport random\n\nrounds = 1\nvalid_input = {1 : 1, 2 : 2, 3 : 3}\nvalid_output = {1 : \"Rock\", 2 : \"Paper\", 3 : \"Scissors\"}\nvictory_dict = {1 : 2, 2 : 3, 3 : 1}\nai_one_response = 0\nai_two_response = 0\nai_one_outcome = 0\nai_two_outcome = 0\nai_one_last_responses = [random.randint(1,3), random.randint(1,3), random.randint(1,3)]\nai_two_last_responses = [random.randint(1,3), random.randint(1,3), random.randint(1,3)]\nai_one_train_input = []\nai_two_train_input = []\nai_one_train_output = []\nai_two_train_output = []\nai_one_victory = 0\nai_two_victory = 0\nai_one_answer = random.randint(1,3)\nai_two_answer = random.randint(1,3)\nai_one_correct_response = victory_dict[ai_two_answer]\nai_two_correct_response = victory_dict[ai_one_answer]\n\nai_one_predictor = LinearRegression(n_jobs=-1)\nai_two_predictor = LinearRegression(n_jobs=-1)\n\nclass Ai(object):\n '''Use this class to create AI players to play Rock Paper Scissors. Use the think method to process an opponents response. A fake\n response will be need to get things going.'''\n def __init__(self, last_responses, own_response, correct_response, training_input, training_output, predictor):\n self.last_responses = last_responses\n self.own_response = own_response\n self.correct_response = correct_response\n self.training_input = training_input\n self.training_output = training_output\n self.predictor = predictor\n def log_answer(self, opponent_response):\n # Log opponent's response\n self.last_responses.append(opponent_response)\n self.last_responses.remove(self.last_responses[0])\n # Store the actual winning response to the opponent's move to use for learning\n self.correct_response = victory_dict[opponent_response]\n # Train the AI off of this round's opponent plays and own plays\n self.train()\n def think(self):\n # Generate a response based on logic\n own_responseRaw = self.logic(self.last_responses[2], self.last_responses[1], self.last_responses[0])\n self.own_response = Clean(own_responseRaw)\n # Make sure response is different than last 3 responses\n if self.last_responses[0] == self.own_response and self.last_responses[1] == self.own_response and self.last_responses[2] == self.own_response:\n self.own_response = random.randint(1,3)\n # Return AI's response to opponent\n return self.own_response\n def train(self):\n self.training_input.append([self.last_responses[0], self.last_responses[1], self.last_responses[2]])\n self.training_output.append(self.correct_response)\n self.train_model(self.training_input, self.training_output)\n def train_model(self, x, y):\n self.predictor.fit(X=x, y=y)\n\n def logic(self, data1, data2, data3):\n self.predictor\n test = [[data1, data2, data3]]\n outcome = self.predictor.predict(X=test)\n return outcome\n\ndef Clean(data):\n global valid_input\n try:\n return valid_input[int(data)]\n except Exception as e:\n return valid_input[1]\n\ndef ConvertToInput(data):\n global valid_input\n return valid_input[str(data)]\n\ndef ConvertToOutput(data):\n global valid_output\n try:\n return valid_output[int(data)]\n except Exception as e:\n return valid_output[1]\n\ndef victory_counter(ai1, ai2):\n global victory_dict\n global ai_one_victory\n global ai_two_victory\n if ai1 == ai2:\n return\n if victory_dict[ai1] != ai2:\n ai_one_victory += 1\n if victory_dict[ai2] != ai1:\n ai_two_victory += 1\n\nai_one = Ai(ai_two_last_responses, ai_one_response, ai_one_correct_response, ai_one_train_input, ai_one_train_output, ai_one_predictor)\nai_two = Ai(ai_one_last_responses, ai_two_response, ai_two_correct_response, ai_two_train_input, ai_two_train_output, ai_two_predictor)\n\nai_one.train()\nai_two.train()\nwhile True:\n now = time()\n ai_one_answer = ai_one.think()\n ai_two_answer = ai_two.think()\n ai_one.log_answer(ai_two_answer)\n ai_two.log_answer(ai_one_answer)\n victory_counter(ai_one_answer, ai_two_answer)\n print(f\"Round: {rounds}, Score: {ai_one_victory} / {ai_two_victory} \\nPlay: Bill= {ConvertToOutput(ai_one_answer)}, Ted= {ConvertToOutput(ai_two_answer)}.\\nTime: {time()-now}.\\n\\n\")\n rounds = rounds + 1\n #sleep(3)"
] | [
[
"sklearn.linear_model.LinearRegression"
]
] |
alfrisch/aqua | [
"179ce59bd1366ebdc85e21a98336e3cf31565e8d"
] | [
"test/test_iqpe.py"
] | [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\nfrom scipy.linalg import expm\nfrom scipy import sparse\nfrom qiskit.transpiler import PassManager\n\nfrom test.common import QiskitAquaTestCase\nfrom qiskit import BasicAer\nfrom qiskit.aqua import Operator, QuantumInstance\nfrom qiskit.aqua.utils import decimal_to_binary\nfrom qiskit.aqua.algorithms import IQPE\nfrom qiskit.aqua.algorithms import ExactEigensolver\nfrom qiskit.aqua.components.initial_states import Custom\n\n\nX = np.array([[0, 1], [1, 0]])\nY = np.array([[0, -1j], [1j, 0]])\nZ = np.array([[1, 0], [0, -1]])\n_I = np.array([[1, 0], [0, 1]])\nh1 = X + Y + Z + _I\nqubitOp_simple = Operator(matrix=h1)\n\n\npauli_dict = {\n 'paulis': [\n {\"coeff\": {\"imag\": 0.0, \"real\": -1.052373245772859}, \"label\": \"II\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": 0.39793742484318045}, \"label\": \"IZ\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": -0.39793742484318045}, \"label\": \"ZI\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": -0.01128010425623538}, \"label\": \"ZZ\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": 0.18093119978423156}, \"label\": \"XX\"}\n ]\n}\nqubitOp_h2_with_2_qubit_reduction = Operator.load_from_dict(pauli_dict)\n\n\npauli_dict_zz = {\n 'paulis': [\n {\"coeff\": {\"imag\": 0.0, \"real\": 1.0}, \"label\": \"ZZ\"}\n ]\n}\nqubitOp_zz = Operator.load_from_dict(pauli_dict_zz)\n\n\nclass TestIQPE(QiskitAquaTestCase):\n \"\"\"IQPE tests.\"\"\"\n\n @parameterized.expand([\n [qubitOp_simple, 'qasm_simulator'],\n [qubitOp_zz, 'statevector_simulator'],\n [qubitOp_h2_with_2_qubit_reduction, 'statevector_simulator'],\n ])\n def test_iqpe(self, qubitOp, simulator):\n self.algorithm = 'IQPE'\n self.log.debug('Testing IQPE')\n\n self.qubitOp = qubitOp\n\n exact_eigensolver = ExactEigensolver(self.qubitOp, k=1)\n results = exact_eigensolver.run()\n\n w = results['eigvals']\n v = results['eigvecs']\n\n self.qubitOp.to_matrix()\n np.testing.assert_almost_equal(\n self.qubitOp._matrix @ v[0],\n w[0] * v[0]\n )\n np.testing.assert_almost_equal(\n expm(-1.j * sparse.csc_matrix(self.qubitOp._matrix)) @ v[0],\n np.exp(-1.j * w[0]) * v[0]\n )\n\n self.ref_eigenval = w[0]\n self.ref_eigenvec = v[0]\n self.log.debug('The exact eigenvalue is: {}'.format(self.ref_eigenval))\n self.log.debug('The corresponding eigenvector: {}'.format(self.ref_eigenvec))\n\n num_time_slices = 50\n num_iterations = 6\n state_in = Custom(self.qubitOp.num_qubits, state_vector=self.ref_eigenvec)\n iqpe = IQPE(self.qubitOp, state_in, num_time_slices, num_iterations,\n expansion_mode='suzuki', expansion_order=2, shallow_circuit_concat=True)\n\n backend = BasicAer.get_backend(simulator)\n quantum_instance = QuantumInstance(backend, shots=100, pass_manager=PassManager())\n\n result = iqpe.run(quantum_instance)\n\n self.log.debug('top result str label: {}'.format(result['top_measurement_label']))\n self.log.debug('top result in decimal: {}'.format(result['top_measurement_decimal']))\n self.log.debug('stretch: {}'.format(result['stretch']))\n self.log.debug('translation: {}'.format(result['translation']))\n self.log.debug('final eigenvalue from IQPE: {}'.format(result['energy']))\n self.log.debug('reference eigenvalue: {}'.format(self.ref_eigenval))\n self.log.debug('ref eigenvalue (transformed): {}'.format(\n (self.ref_eigenval + result['translation']) * result['stretch'])\n )\n self.log.debug('reference binary str label: {}'.format(decimal_to_binary(\n (self.ref_eigenval.real + result['translation']) * result['stretch'],\n max_num_digits=num_iterations + 3,\n fractional_part_only=True\n )))\n\n np.testing.assert_approx_equal(result['energy'], self.ref_eigenval.real, significant=2)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.testing.assert_almost_equal",
"scipy.sparse.csc_matrix",
"numpy.testing.assert_approx_equal",
"numpy.exp"
]
] |
hernot/bitshuffle | [
"a60471d37a8cbbd8265dc8cfa83a9320abdcb590"
] | [
"tests/make_regression_tdata.py"
] | [
"\"\"\"\nScript to create data used for regression testing.\n\n\"\"\"\n\nimport numpy as np\nfrom numpy import random\nimport h5py\n\nimport bitshuffle\nfrom bitshuffle import h5\nfrom h5py import h5z\n\nBLOCK_SIZE = 64 # Smallish such that datasets have many blocks but are small.\nCOMP_LVL = 10 # ZSTD compression level\nFILTER_PIPELINE = [h5.H5FILTER]\nFILTER_OPTS = [\n [(BLOCK_SIZE, h5.H5_COMPRESS_LZ4)],\n [(BLOCK_SIZE, h5.H5_COMPRESS_ZSTD, COMP_LVL)],\n]\n\nOUT_FILE = \"tests/data/regression_%s.h5\" % bitshuffle.__version__\n\nDTYPES = [\"a1\", \"a2\", \"a3\", \"a4\", \"a6\", \"a8\", \"a10\"]\n\nf = h5py.File(OUT_FILE, \"w\")\ng_orig = f.create_group(\"origional\")\ng_comp_lz4 = f.create_group(\"compressed\")\ng_comp_zstd = f.create_group(\"compressed_zstd\")\n\nfor dtype in DTYPES:\n for rep in [\"a\", \"b\", \"c\"]:\n dset_name = \"%s_%s\" % (dtype, rep)\n dtype = np.dtype(dtype)\n n_elem = 3 * BLOCK_SIZE + random.randint(0, BLOCK_SIZE)\n shape = (n_elem,)\n chunks = shape\n data = random.randint(0, 255, n_elem * dtype.itemsize)\n data = data.astype(np.uint8).view(dtype)\n\n g_orig.create_dataset(dset_name, data=data)\n\n # Create LZ4 compressed data\n h5.create_dataset(\n g_comp_lz4,\n bytes(dset_name, \"utf-8\"),\n shape,\n dtype,\n chunks=chunks,\n filter_pipeline=FILTER_PIPELINE,\n filter_flags=(h5z.FLAG_MANDATORY,),\n filter_opts=FILTER_OPTS[0],\n )\n g_comp_lz4[dset_name][:] = data\n\n # Create ZSTD compressed data\n h5.create_dataset(\n g_comp_zstd,\n bytes(dset_name, \"utf-8\"),\n shape,\n dtype,\n chunks=chunks,\n filter_pipeline=FILTER_PIPELINE,\n filter_flags=(h5z.FLAG_MANDATORY,),\n filter_opts=FILTER_OPTS[1],\n )\n g_comp_zstd[dset_name][:] = data\n\nf.close()\n"
] | [
[
"numpy.random.randint",
"numpy.dtype"
]
] |
pasqoc/heterocl | [
"bdb87b01cbdf613fe746d25dd949e18cd4942ecf"
] | [
"hlib/python/hlib/ip/fft.py"
] | [
"import heterocl as hcl\nimport numpy as np\nfrom hlib.op.extern import *\n\ndtype = hcl.Int()\n\n@register_extern_ip(vendor=\"xilinx\")\ndef single_fft_hls(X_real, X_imag, F_real=None, F_imag=None, name=None):\n\n if name is None: name = \"hls::fft<config>\"\n L = X_real.shape[0]\n assert X_real.shape == X_imag.shape\n assert np.log2(L) % 1 == 0, \"length must be power of 2: \" + str(L)\n\n return_tensors = False\n if (F_real is None) and (F_imag is None):\n return_tensors = True\n F_real = hcl.compute((L,), lambda i: 0, name='F_real')\n F_imag = hcl.compute((L,), lambda i: 0, name='F_imag')\n\n # functional behavior\n with hcl.Stage(\"ExternModule\") as Module:\n num_stages = int(np.log2(L))\n bit_width = int(np.log2(L))\n IndexTable = np.zeros((L), dtype='int')\n for i in range(L):\n b = '{:0{width}b}'.format(i, width=bit_width)\n IndexTable[i] = int(b[::-1], 2)\n\n Table = hcl.copy(IndexTable, \"table\", dtype=hcl.Int())\n hcl.update(F_real, lambda i: X_real[Table[i]], name='F_real_update')\n hcl.update(F_imag, lambda i: X_imag[Table[i]], name='F_imag_update')\n\n with hcl.Stage(\"Out\"):\n one = hcl.scalar(1, dtype=\"int32\", name=\"one\")\n with hcl.for_(0, num_stages) as stage:\n DFTpts = one[0] << (stage + 1)\n numBF = DFTpts / 2\n e = -2 * np.pi / DFTpts\n a = hcl.scalar(0, \"a\")\n with hcl.for_(0, numBF) as j:\n c = hcl.scalar(hcl.cos(a[0]), name=\"cos\")\n s = hcl.scalar(hcl.sin(a[0]), name=\"sin\")\n a[0] = a[0] + e\n with hcl.for_(j, L + DFTpts - 1, DFTpts) as i:\n i_lower = i + numBF\n temp_r = hcl.scalar(F_real[i_lower] * c - F_imag[i_lower] * s, \"temp_r\")\n temp_i = hcl.scalar(F_imag[i_lower] * c + F_real[i_lower] * s, \"temp_i\")\n F_real[i_lower] = F_real[i] - temp_r[0]\n F_imag[i_lower] = F_imag[i] - temp_i[0]\n F_real[i] = F_real[i] + temp_r[0]\n F_imag[i] = F_imag[i] + temp_i[0]\n\n dicts = {}\n dicts[\"name\"] = name\n tensors = [X_real, X_imag, F_real, F_imag]\n dicts[\"args\"] = [(_.name, _.dtype) for _ in tensors]\n\n # declare headers and typedef \n dicts[\"header\"] = \"\"\"\n#include \\\"hls_fft.h\\\"\n#include <complex>\nstruct config : hls::ip_fft::params_t {\n static const unsigned ordering_opt = hls::ip_fft::natural_order;\n static const unsigned config_width = 16; // FFT_CONFIG_WIDTH\n};\ntypedef ap_fixed<16,1> data_t;\ntypedef std::complex<data_t> fxpComplex;\n\"\"\"\n # extern ip function \n dicts[\"func\"] = \"\"\"\n hls::ip_fft::config_t<config> fft_config;\n hls::ip_fft::status_t<config> fft_status;\n #pragma HLS INTERFACE ap_fifo port=fft_config\n fft_config.setDir(0);\n fft_config.setSch(0x2AB);\n std::complex<data_t> xn[{}];\n std::complex<data_t> xk[{}];\n #pragma HLS INTERFACE ap_fifo port=xn depth=16\n #pragma HLS INTERFACE ap_fifo port=xk depth=16\n for (int i = 0; i < {}; i++) {{ \n #pragma HLS pipeline rewind\n xn[i] = fxpComplex({}[i], {}[i]);\n }}\n hls::fft<config>(xn, xk, &fft_status, &fft_config); \n for (int i = 0; i < {}; i++) {{\n #pragma HLS pipeline rewind\n {}[i] = xk[i].real();\n {}[i] = xk[i].imag();\n }}\n\"\"\".format(L, L, L, X_real.name, X_imag.name,\n L, F_real.name, F_imag.name)\n\n create_extern_module(Module, dicts, ip_type=\"hls\")\n if return_tensors: return F_real, F_imag\n\n"
] | [
[
"numpy.log2",
"numpy.zeros"
]
] |
linu1983/openpilot | [
"15791f1ad00b5f9417e2455dafa3426bfe22095a"
] | [
"selfdrive/controls/lib/latcontrol_indi.py"
] | [
"import math\nimport numpy as np\n\nfrom cereal import log\nfrom common.realtime import DT_CTRL\nfrom common.numpy_fast import clip\nfrom selfdrive.car.toyota.values import SteerLimitParams\nfrom selfdrive.car import apply_toyota_steer_torque_limits\nfrom selfdrive.controls.lib.drive_helpers import get_steer_max\nfrom selfdrive.ntune import nTune\n\nclass LatControlINDI():\n def __init__(self, CP):\n self.angle_steers_des = 0.\n\n A = np.array([[1.0, DT_CTRL, 0.0],\n [0.0, 1.0, DT_CTRL],\n [0.0, 0.0, 1.0]])\n C = np.array([[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0]])\n\n # Q = np.matrix([[1e-2, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 10.0]])\n # R = np.matrix([[1e-2, 0.0], [0.0, 1e3]])\n\n # (x, l, K) = control.dare(np.transpose(A), np.transpose(C), Q, R)\n # K = np.transpose(K)\n K = np.array([[7.30262179e-01, 2.07003658e-04],\n [7.29394177e+00, 1.39159419e-02],\n [1.71022442e+01, 3.38495381e-02]])\n\n self.K = K\n self.A_K = A - np.dot(K, C)\n self.x = np.array([[0.], [0.], [0.]])\n\n self.enforce_rate_limit = CP.carName == \"toyota\"\n\n self.RC = CP.lateralTuning.indi.timeConstant\n self.G = CP.lateralTuning.indi.actuatorEffectiveness\n self.outer_loop_gain = CP.lateralTuning.indi.outerLoopGain\n self.inner_loop_gain = CP.lateralTuning.indi.innerLoopGain\n self.alpha = 1. - DT_CTRL / (self.RC + DT_CTRL)\n\n self.sat_count_rate = 1.0 * DT_CTRL\n self.sat_limit = CP.steerLimitTimer\n\n self.reset()\n self.tune = nTune(CP, self)\n \n def reset(self):\n self.delayed_output = 0.\n self.output_steer = 0.\n self.sat_count = 0.0\n\n def _check_saturation(self, control, check_saturation, limit):\n saturated = abs(control) == limit\n\n if saturated and check_saturation:\n self.sat_count += self.sat_count_rate\n else:\n self.sat_count -= self.sat_count_rate\n\n self.sat_count = clip(self.sat_count, 0.0, 1.0)\n\n return self.sat_count > self.sat_limit\n\n def update(self, active, CS, CP, path_plan):\n self.tune.check()\n # Update Kalman filter\n y = np.array([[math.radians(CS.steeringAngle)], [math.radians(CS.steeringRate)]])\n self.x = np.dot(self.A_K, self.x) + np.dot(self.K, y)\n\n indi_log = log.ControlsState.LateralINDIState.new_message()\n indi_log.steerAngle = math.degrees(self.x[0])\n indi_log.steerRate = math.degrees(self.x[1])\n indi_log.steerAccel = math.degrees(self.x[2])\n\n if CS.vEgo < 0.3 or not active:\n indi_log.active = False\n self.output_steer = 0.0\n self.delayed_output = 0.0\n else:\n self.angle_steers_des = path_plan.angleSteers\n self.rate_steers_des = path_plan.rateSteers\n\n steers_des = math.radians(self.angle_steers_des)\n rate_des = math.radians(self.rate_steers_des)\n\n # Expected actuator value\n self.delayed_output = self.delayed_output * self.alpha + self.output_steer * (1. - self.alpha)\n\n # Compute acceleration error\n rate_sp = self.outer_loop_gain * (steers_des - self.x[0]) + rate_des\n accel_sp = self.inner_loop_gain * (rate_sp - self.x[1])\n accel_error = accel_sp - self.x[2]\n\n # Compute change in actuator\n g_inv = 1. / self.G\n delta_u = g_inv * accel_error\n\n # Enforce rate limit\n if self.enforce_rate_limit:\n steer_max = float(SteerLimitParams.STEER_MAX)\n new_output_steer_cmd = steer_max * (self.delayed_output + delta_u)\n prev_output_steer_cmd = steer_max * self.output_steer\n new_output_steer_cmd = apply_toyota_steer_torque_limits(new_output_steer_cmd, prev_output_steer_cmd, prev_output_steer_cmd, SteerLimitParams)\n self.output_steer = new_output_steer_cmd / steer_max\n else:\n self.output_steer = self.delayed_output + delta_u\n\n steers_max = get_steer_max(CP, CS.vEgo)\n self.output_steer = clip(self.output_steer, -steers_max, steers_max)\n\n indi_log.active = True\n indi_log.rateSetPoint = float(rate_sp)\n indi_log.accelSetPoint = float(accel_sp)\n indi_log.accelError = float(accel_error)\n indi_log.delayedOutput = float(self.delayed_output)\n indi_log.delta = float(delta_u)\n indi_log.output = float(self.output_steer)\n\n check_saturation = (CS.vEgo > 10.) and not CS.steeringRateLimited and not CS.steeringPressed\n indi_log.saturated = self._check_saturation(self.output_steer, check_saturation, steers_max)\n\n return float(self.output_steer), float(self.angle_steers_des), indi_log\n"
] | [
[
"numpy.array",
"numpy.dot"
]
] |
jdheinz/project-ordo_ab_chao | [
"4063f93b297bab43cff6ca64fa5ba103f0c75158"
] | [
"src/django_website/display_graphs/views.py"
] | [
"from django.shortcuts import render\nfrom ebaysdk.finding import Connection as finding\nimport xmltodict\nfrom json import loads, dumps\nimport pandas as pd\nimport numpy as np\nimport datetime\n\nfrom . import outOfSample, neuralNetwork, mLinearRegression\n\n# create empty dataframe within scope of entire file\ncontent_df = pd.DataFrame()\n\n# do ebay search by keywords and pass to graphs.html, and get predictions\ndef display_the_graphs(request):\n keywords = request.POST.get('search')\n api = finding(appid='JohnHein-homepage-PRD-392e94856-07aba7fe', config_file=None, siteid='EBAY-US')\n api_request = {'keywords':keywords, 'itemFilter':[{'name':'SoldItemsOnly', 'value':True},],'outputSelector':['SellerInfo']}\n response = api.execute('findCompletedItems', api_request)\n content = response.content\n xml_dict = xmltodict.parse(content)\n content_dict = to_dict(xml_dict)\n count = content_dict['findCompletedItemsResponse']['searchResult']['@count']\n item_dict = content_dict['findCompletedItemsResponse']['searchResult']['item']\n print('count:', count)\n #print('\\nitem_dict:\\n', item_dict)\n content_df = extract_values(item_dict)\n content_df_copy = content_df.copy()\n y_values = content_df_copy['endPrice'].tolist()\n y_values = [float(i) for i in y_values]\n x_values_b = content_df_copy['endTime'].tolist()\n x_values = convert_datetime(x_values_b)\n #print('\\nx_values: ', x_values,'\\n')\n #print('\\ny_values: ', y_values,'\\n')\n #print('\\nx_values count:', len(x_values),'\\n')\n #print('\\ny_values count:', len(y_values),'\\n')\n #print('\\nx_values type:', type(x_values[-1]),'\\n')\n #print('\\ny_values type:', type(y_values[-1]),'\\n')\n chart1_data = [list(i) for i in zip(x_values, y_values)]\n oos = outOfSample.Oos()\n df2 = oos.out_of_sample(content_df)\n nn = neuralNetwork.Neural_Network()\n df3, history = nn.neural_network(content_df)\n mlr = mLinearRegression.MultivariateLinearRegression()\n df4 = mlr.regression(content_df)\n nn_x_values = df3['predictions'].tolist()\n nn_y_values = df3['actual_sell_prices'].tolist()\n chart2_data = [list(i) for i in zip(nn_x_values, nn_y_values)]\n mlr_x_values = df4['predictions'].tolist()\n mlr_y_values = df4['actual_sell_prices'].tolist()\n chart4_data = [list(i) for i in zip(mlr_x_values, mlr_y_values)]\n #print('chart1 data:', chart1_data)\n context = {\n 'response': content_df.to_html(),\n 'chart1': chart1_data,\n 'chart4': chart4_data,\n 'chart2': chart2_data,\n 'oos_df': df2.to_html(),\n 'nn_df': df3.to_html(),\n 'mlr_df': df4.to_html()\n }\n return render(request, 'display_graphs/graphs.html', context)\n\n# convert ordered dictionary to regular dictionary\ndef to_dict(input_ordered_dict):\n return loads(dumps(input_ordered_dict))\n\n# take ebay response data and put into dataframe\ndef extract_values(temp_dict):\n df = pd.DataFrame(columns=['itemId','title','listingType','endPrice','shippingServiceCost','bidCount','watchCount','returnsAccepted','location','endTime','startTime','handlingTime','sellerUserName','feedbackScore','positiveFeedbackPercent','topRatedSeller'])\n a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]\n #print('\\ntype of data:\\n', type(temp_dict))\n length = len(temp_dict)\n #print('\\nlength:\\n', length)\n for index in range(length):\n for key, value in temp_dict[index].items():\n #print('temp_dict[index][key]:', key)\n if key == 'itemId':\n a.append(value)\n if key == 'title':\n b.append(value)\n if key == 'sellingStatus':\n c.append(temp_dict[index]['sellingStatus']['convertedCurrentPrice']['#text'])\n try:\n d.append(temp_dict[index]['sellingStatus']['bidCount'])\n except KeyError:\n d.append(np.nan)\n if key == 'shippingInfo':\n e.append(temp_dict[index]['shippingInfo']['handlingTime'])\n try:\n m.append(temp_dict[index]['shippingInfo']['shippingServiceCost']['#text'])\n except KeyError:\n m.append(np.nan)\n if key == 'sellerInfo':\n f.append(temp_dict[index]['sellerInfo']['sellerUserName'])\n g.append(temp_dict[index]['sellerInfo']['feedbackScore'])\n h.append(temp_dict[index]['sellerInfo']['positiveFeedbackPercent'])\n n.append(temp_dict[index]['sellerInfo']['topRatedSeller'])\n if key == 'location':\n i.append(value)\n if key == 'listingInfo':\n j.append(temp_dict[index]['listingInfo']['endTime'])\n l.append(temp_dict[index]['listingInfo']['startTime'])\n p.append(temp_dict[index]['listingInfo']['listingType'])\n try:\n k.append(temp_dict[index]['listingInfo']['watchCount'])\n except KeyError:\n k.append(np.nan)\n if key == 'returnsAccepted':\n o.append(value)\n\n df = pd.DataFrame({'itemId':pd.Series(a),'title':pd.Series(b),'listingType':pd.Series(p),'endPrice':pd.Series(c),'shippingServiceCost':pd.Series(m),\n 'bidCount':pd.Series(d),'watchCount':pd.Series(k),'returnsAccepted':pd.Series(o),\n 'location':pd.Series(i),'endTime':pd.Series(j),'startTime':pd.Series(l),'handlingTime':pd.Series(e),\n 'sellerUserName':pd.Series(f),'feedbackScore':pd.Series(g),'positiveFeedbackPercent':pd.Series(h),\n 'topRatedSeller':pd.Series(n)}) \n #print('\\ndf:\\n', df)\n #print('\\narray a:\\n', a)\n #print('\\narray b:\\n', b)\n #print('\\narray c:\\n', c)\n #print('\\narray d:\\n', d)\n #print('\\narray f:\\n', f)\n df['endTime'] = pd.to_datetime(df['endTime']) # datetime ISO 8601 format ---> YYYY-MM-DD HH:MM:SS +HH:MM (NOTE: '+HH:MM' is UTC offset)\n df['endTimeOfDay'],df['endDate'] = df['endTime'].apply(lambda x:x.time()),df['endTime'].apply(lambda x:x.date())\n return df\n\n# convert the datetime for that column in the dataframe\ndef convert_datetime(arr):\n arr2 = []\n for i in arr:\n dateobj = str(i)\n dateobj = dateobj[:19]\n arr2.append(int(datetime.datetime.strptime(dateobj, \"%Y-%m-%d %H:%M:%S\").timestamp())*1000)\n #print('convert_datetime ',arr2[-1])\n #print('dateobj:', dateobj)\n return arr2\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.Series"
]
] |
vanatteveldt/dldemo | [
"017982fc756c6047fb6bc8d20e3b6878c76e39b5"
] | [
"demo.py"
] | [
"import logging\n\nimport gensim\nimport numpy as np\nimport pandas as pd\nfrom keras.callbacks import Callback\nfrom keras.layers import Dense, Input, GlobalMaxPooling1D, Conv1D, Embedding\nfrom keras.models import Model\nfrom keras.optimizers import RMSprop\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils import to_categorical\nfrom tensorflow_core.python.training.tracking.util import keras_backend\n\nlogging.basicConfig(level=logging.INFO, format='[%(asctime)s %(name)-12s %(levelname)-5s] %(message)s')\n\nlogging.info(\"Loading data from sentences_ml.csv\")\ndata = pd.read_csv(\"sentences_ml.csv\")\n\n# code output as two 'neurons': tone (0=negative, 1=positive) and subjectivity (0=no sentiment, 1=sentiment)\n# can also keep single neuron with three values, or three neurons (pos/neut/neg)\n# but in my test that performed slightly worse (but that could well depend on task)\nlabels = np.asarray([[(x + 1) / 2, int(x != 0)] for x in data.tone])\n\n# tokenize the input\ntokenizer = Tokenizer(num_words=10000) # basically no length limit, these are headlines anyway\ntokenizer.fit_on_texts(data.lemmata)\nsequences = tokenizer.texts_to_sequences(data.lemmata)\ntokens = pad_sequences(sequences) # input sequences\n\n# split train / test (in this case, on data.gold)\ntrain_data = tokens[data.gold == 0]\ntest_data = tokens[data.gold == 1]\n\ntrain_labels = labels[data.gold == 0]\ntest_labels = labels[data.gold == 1]\n\ntest_ids = data.id[data.gold == 1].values\n\nlogging.info(\"Loading embeddings\")\n# to download our Dutch embeddings: wget http://i.amcat.nl/w2v -O data/tmp/w2v_320d\n# of course, many pretrained embeddings exist for many languages\nembeddings = gensim.models.Word2Vec.load(\"w2v_320d\")\nembeddings_matrix = np.zeros((len(tokenizer.word_index) + 1, embeddings.vector_size))\nfor word, i in tokenizer.word_index.items():\n if word in embeddings.wv:\n embeddings_matrix[i] = embeddings.wv[word]\n\nlogging.info(\"Creating model\")\n# embedding input layer\nembedding_layer = Embedding(embeddings_matrix.shape[0],\n embeddings_matrix.shape[1],\n weights=[embeddings_matrix],\n input_length=train_data.shape[1],\n trainable=True)\n\nsequence_input = Input(shape=(train_data.shape[1],), dtype='int32')\nembedded_sequences = embedding_layer(sequence_input)\n\n# Add convolution and pooling layers\nx = Conv1D(128, 3, activation='relu')(embedded_sequences)\nx = GlobalMaxPooling1D()(x)\n\n# Add dense hidden layer(s)\nx = Dense(64, activation='relu')(x)\n\n# Add output layer\npreds = Dense(2, activation='sigmoid')(x)\n\n# Create and compile Model\nmodel = Model(sequence_input, preds)\nmodel.compile(loss='mean_absolute_error', optimizer=RMSprop(lr=0.004))\n\nprint(model.summary())\nmodel.fit(train_data, train_labels, epochs=4, batch_size=128)\n\noutput = model.predict(test_data)\nkeras_backend.clear_session()\n\ndef output2sentiment(output):\n \"\"\"Convert NN output to {-1, 0, 1}\"\"\"\n return [0 if x[1] < .5 else (x[0] < 0.5 and -1 or 1) for x in output]\n\n\np = output2sentiment(output)\nactual = output2sentiment(test_labels)\n\nacc = sum([x == y for (x, y) in zip(p, actual)]) / len(p)\nprint(f\"Final accuracy: {acc}\")\n"
] | [
[
"pandas.read_csv"
]
] |
espinielli/traffic | [
"151b29664d9ea25f720fcfbdc6a01966b53391ff"
] | [
"traffic/data/basic/navaid.py"
] | [
"# flake8: noqa\n\nimport logging\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import Dict, Iterator, Optional\n\nimport pandas as pd\n\nfrom ...core.mixins import GeoDBMixin\nfrom ...core.structure import Navaid, NavaidTuple\n\n__github_url = \"https://raw.githubusercontent.com/\"\nbase_url = __github_url + \"xoolive/traffic/master/data/navdata\"\n\n\nclass Navaids(GeoDBMixin):\n\n \"\"\"\n `VOR <https://en.wikipedia.org/wiki/VHF_omnidirectional_range>`_, `DME\n <https://en.wikipedia.org/wiki/Distance_measuring_equipment>`_ and `NDB\n <https://en.wikipedia.org/wiki/Non-directional_beacon>`_ are short-range\n radio navigation systems for aircraft. They help aircraft with a receiving\n unit to determine their position and stay on course.\n\n The first airways were designed after the locations of these beacons. Recent\n progress in GNSS systems helped define more positions by their latitudes and\n longitudes, referred to as FIX.\n\n - Read more `here <https://aerosavvy.com/navigation-name-nonsense/>`_ about\n navigational beacons and how FIX names are decided.\n\n - Read more `here <scenarios/calibration.html>`_ about calibration of such\n equipment.\n\n A (deprecated) database of world navigational beacons is available as:\n\n >>> from traffic.data import navaids\n\n Any navigational beacon can be accessed by the bracket notation:\n\n >>> navaids['NARAK']\n NARAK (FIX): 44.29527778 1.74888889\n\n \"\"\"\n\n cache_dir: Path\n alternatives: Dict[str, \"Navaids\"] = dict()\n name: str = \"default\"\n\n def __init__(self, data: Optional[pd.DataFrame] = None) -> None:\n self._data: Optional[pd.DataFrame] = data\n if self.available:\n Navaids.alternatives[self.name] = self\n\n @property\n def available(self) -> bool:\n return True\n\n def download_data(self) -> None: # coverage: ignore\n \"\"\"Downloads the latest version of the navaid database from the\n repository.\n \"\"\"\n\n from .. import session\n\n iter_lines: Iterator[bytes]\n navaids = []\n\n cache_file = self.cache_dir / \"earth_fix.dat\"\n if cache_file.exists():\n iter_lines = cache_file.open(\"rb\")\n else:\n c = session.get(f\"{base_url}/earth_fix.dat\")\n c.raise_for_status()\n iter_lines = c.iter_lines()\n\n for line_bytes in iter_lines:\n\n line = line_bytes.decode(encoding=\"ascii\", errors=\"ignore\").strip()\n\n # Skip empty lines or comments\n if len(line) < 3 or line[0] == \"#\":\n continue\n\n # Start with valid 2 digit latitude -45. or 52.\n if not ((line[0] == \"-\" and line[3] == \".\") or line[2] == \".\"):\n continue\n\n # Data line => Process fields of this record, separated by a comma\n # Example line:\n # 30.580372 -094.384169 FAREL\n fields = line.split()\n navaids.append(\n Navaid(\n fields[2],\n \"FIX\",\n float(fields[0]),\n float(fields[1]),\n float(\"nan\"),\n None,\n None,\n None,\n )\n )\n\n cache_file = self.cache_dir / \"earth_nav.dat\"\n if cache_file.exists():\n iter_lines = cache_file.open(\"rb\")\n else:\n c = session.get(f\"{base_url}/earth_nav.dat\")\n c.raise_for_status()\n iter_lines = c.iter_lines()\n\n for line_bytes in iter_lines:\n\n line = line_bytes.decode(encoding=\"ascii\", errors=\"ignore\").strip()\n\n # Skip empty lines or comments\n if len(line) == 0 or line[0] == \"#\":\n continue\n\n # Data line => Process fields of this record, separated by a comma\n # Example lines:\n # 2 58.61466599 125.42666626 451 522 30 0.0 A Aldan NDB\n # 3 31.26894444 -085.72630556 334 11120 40 -3.0 OZR CAIRNS VOR-DME\n # type lat lon elev freq ? var id desc\n # 0 1 2 3 4 5 6 7 8\n\n fields = line.split()\n\n # Valid line starts with integers\n if not fields[0].isdigit():\n continue # Next line\n\n # Get code for type of navaid\n itype = int(fields[0])\n\n # Type names\n wptypedict = {\n 2: \"NDB\",\n 3: \"VOR\",\n 4: \"ILS\",\n 5: \"LOC\",\n 6: \"GS\",\n 7: \"OM\",\n 8: \"MM\",\n 9: \"IM\",\n 12: \"DME\",\n 13: \"TACAN\",\n }\n\n # Type code never larger than 20\n if itype not in list(wptypedict.keys()):\n continue # Next line\n\n wptype = wptypedict[itype]\n\n # Select types to read\n if wptype not in [\"NDB\", \"VOR\", \"ILS\", \"GS\", \"DME\", \"TACAN\"]:\n continue # Next line\n\n # Find description\n try:\n idesc = line.index(fields[7]) + len(fields[7])\n description: Optional[str] = line[idesc:].strip().upper()\n except Exception:\n description = None\n\n navaids.append(\n Navaid(\n fields[7],\n wptype,\n float(fields[1]),\n float(fields[2]),\n float(fields[3][1:])\n if fields[3].startswith(\"0-\")\n else float(fields[3]),\n float(fields[4])\n if wptype == \"NDB\"\n else float(fields[4]) / 100,\n float(fields[6])\n if wptype in [\"VOR\", \"NDB\", \"ILS\", \"GS\"]\n else None,\n description,\n )\n )\n\n self._data = pd.DataFrame.from_records(\n navaids, columns=NavaidTuple._fields\n )\n\n self._data.to_pickle(self.cache_dir / \"traffic_navaid.pkl\")\n\n @property\n def data(self) -> pd.DataFrame:\n if self._data is not None:\n return self._data\n\n if not (self.cache_dir / \"traffic_navaid.pkl\").exists():\n self.download_data()\n else:\n logging.info(\"Loading navaid database\")\n self._data = pd.read_pickle(self.cache_dir / \"traffic_navaid.pkl\")\n\n if self._data is not None:\n self._data = self._data.rename(\n columns=dict(alt=\"altitude\", lat=\"latitude\", lon=\"longitude\")\n )\n return self._data\n\n @lru_cache()\n def __getitem__(self, name: str) -> Optional[Navaid]:\n x = self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n if x.shape[0] == 0:\n return None\n dic = dict(x.iloc[0])\n if \"altitude\" not in dic:\n dic[\"altitude\"] = None\n dic[\"frequency\"] = None\n dic[\"magnetic_variation\"] = None\n return Navaid(**dic)\n\n def global_get(self, name: str) -> Optional[Navaid]:\n \"\"\"Search for a navaid from all alternative data sources.\"\"\"\n for _key, value in self.alternatives.items():\n alt = value[name]\n if alt is not None:\n return alt\n return None\n\n def __iter__(self) -> Iterator[Navaid]:\n for _, x in self.data.iterrows():\n yield Navaid(**x)\n\n def search(self, name: str) -> \"Navaids\":\n \"\"\"\n Selects the subset of airways matching name in the name or description\n field.\n\n .. warning::\n The same name may match several navigational beacons in the world.\n\n >>> navaids.search(\"ZUE\")\n name type lat lon alt frequency description\n 272107 ZUE NDB 30.900000 20.068333 0.0 369.00 0.0 ZUEITINA NDB\n 275948 ZUE VOR 47.592167 8.817667 1730.0 110.05 2.0 ZURICH EAST VOR-DME\n 290686 ZUE DME 47.592167 8.817667 1730.0 110.05 NaN ZURICH EAST VOR-DME\n \"\"\"\n return self.__class__(\n self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n )\n"
] | [
[
"pandas.read_pickle",
"pandas.DataFrame.from_records"
]
] |
Erotemic/plottool_ibeis | [
"dfa0d627cfd9fd8221dbb73d97dcac1e7ddf216f"
] | [
"plottool_ibeis/interact_annotations.py"
] | [
"\"\"\"\nInteractive tool to draw mask on an image or image-like array.\n\nTODO:\n * need concept of subannotation\n * need to take options on a right click of an annotation\n * add support for arbitrary polygons back in .\n * rename species_list to label_list or category_list\n * Just use metadata instead of species / category / label\n # Need to incorporate parts into metadata\n\nNotes:\n 3. Change bounding box and update continuously to the original image the\n new ANNOTATIONs\n\n 2. Make new window and frames inside, double click to pull up normal window\n with editing start with just taking in 6 images and ANNOTATIONs\n\n 1. ANNOTATION ID number, then list of 4 tuples\n\n python -m utool.util_inspect check_module_usage --pat=\"interact_annotations.py\"\n\nReferences:\n Adapted from matplotlib/examples/event_handling/poly_editor.py\n Jan 9 2014: taken from: https://gist.github.com/tonysyu/3090704\n\nCommandLine:\n python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport six\nimport re\nimport numpy as np\ntry:\n import vtool_ibeis as vt\nexcept ImportError:\n pass\nimport utool as ut\nimport itertools as it\nimport matplotlib as mpl\nfrom six.moves import zip, range\nfrom plottool_ibeis import draw_func2 as df2\nfrom plottool_ibeis import abstract_interaction\nprint, rrr, profile = ut.inject2(__name__)\n\n\nDEFAULT_SPECIES_TAG = '____'\n# FIXE THESE TO BE GENERIC\nACCEPT_SAVE_HOTKEY = None # 'ctrl+a'\nADD_RECTANGLE_HOTKEY = 'ctrl+a' # 'ctrl+d'\nADD_RECTANGLE_FULL_HOTKEY = 'ctrl+f'\nDEL_RECTANGLE_HOTKEY = 'ctrl+d' # 'ctrl+r'\nTOGGLE_LABEL_HOTKEY = 'ctrl+t'\n\nHACK_OFF_SPECIES_TYPING = True\nif HACK_OFF_SPECIES_TYPING:\n ADD_RECTANGLE_HOTKEY = 'a' # 'ctrl+d'\n ADD_RECTANGLE_FULL_HOTKEY = 'f'\n DEL_RECTANGLE_HOTKEY = 'd' # 'ctrl+r'\n TOGGLE_LABEL_HOTKEY = 't'\n\nNEXT_IMAGE_HOTKEYS = ['right', 'pagedown']\nPREV_IMAGE_HOTKEYS = ['left', 'pageup']\n\nTAU = np.pi * 2\n\n\nclass AnnotPoly(mpl.patches.Polygon, ut.NiceRepr):\n \"\"\"\n Helper to represent an annotation polygon\n ibeis --aidcmd='Interact image' --aid=1\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from plottool_ibeis.interact_annotations import * # NOQA\n >>> verts = vt.verts_from_bbox([0, 0, 10, 10])\n >>> poly = AnnotPoly(None, 0, verts, 0, '____')\n \"\"\"\n def __init__(poly, ax, num, verts, theta, species, fc=(0, 0, 0),\n line_color=(1, 1, 1), line_width=4, is_orig=False,\n metadata=None, valid_species=None, manager=None):\n\n super(AnnotPoly, poly).__init__(verts, animated=True, fc=fc, ec='none',\n alpha=0)\n poly.manager = manager\n # Ensure basecoords consistency\n poly.basecoords = vt.verts_from_bbox(vt.bbox_from_verts(poly.xy))\n #poly.basecoords = poly.xy\n poly.num = num\n poly.is_orig = is_orig\n poly.theta = theta\n poly.metadata = metadata\n poly.valid_species = valid_species\n poly.tab_list = valid_species\n # put in previous text and tabcomplete list for autocompletion\n poly.tctext = ''\n poly.tcindex = 0\n poly.anchor_idx = 2\n poly.child_polys = {}\n\n # Display stuff that should be removed from constructor\n poly.xy = calc_display_coords(poly.basecoords, poly.theta)\n poly.lines = poly._make_lines(line_color, line_width)\n poly.handle = poly._make_handle_line()\n poly.species = species\n if ax is not None:\n poly.axes_init(ax)\n\n def axes_init(poly, ax):\n species = poly.species\n metadata = poly.metadata\n if isinstance(metadata, ut.LazyDict):\n metadata_ = ut.dict_subset(metadata, metadata.cached_keys())\n else:\n metadata_ = metadata\n poly.species_tag = ax.text(\n #tagpos[0], tagpos[1],\n 0, 0,\n species,\n bbox={'facecolor': 'white', 'alpha': .8},\n verticalalignment='top',\n )\n poly.metadata_tag = ax.text(\n 0, 0,\n #tagpos[0] + 5, tagpos[1] + 80,\n ut.repr3(metadata_, nobr=True),\n bbox={'facecolor': 'white', 'alpha': .7},\n verticalalignment='top',\n )\n # ???\n poly.species_tag.remove() # eliminate \"leftover\" copies\n poly.metadata_tag.remove()\n #\n poly.update_display_coords()\n\n def move_to_back(poly):\n # FIXME: doesnt work exactly\n # Probalby need to do in the context of other polys\n zorder = 0\n poly.set_zorder(zorder)\n poly.lines.set_zorder(zorder)\n poly.handle.set_zorder(zorder)\n\n def __nice__(poly):\n return '(num=%r)' % (poly.num)\n\n def add_to_axis(poly, ax):\n ax.add_patch(poly)\n ax.add_line(poly.lines)\n ax.add_line(poly.handle)\n\n def remove_from_axis(poly, ax):\n poly.remove()\n poly.lines.remove()\n poly.handle.remove()\n\n def draw_self(poly, ax, show_species_tags=False, editable=True):\n ax.draw_artist(poly)\n if not editable and poly.lines.get_marker():\n poly.lines.set_marker('')\n elif editable and not poly.lines.get_marker():\n poly.lines.set_marker('o')\n ax.draw_artist(poly.lines)\n if editable:\n ax.draw_artist(poly.handle)\n if editable and show_species_tags:\n # Hack to fix matplotlib 1.5 bug\n poly.species_tag.figure = ax.figure\n poly.metadata_tag.figure = ax.figure\n ax.draw_artist(poly.species_tag)\n ax.draw_artist(poly.metadata_tag)\n\n def _make_lines(poly, line_color, line_width):\n \"\"\" verts - list of (x, y) tuples \"\"\"\n _xs, _ys = list(zip(*poly.xy))\n color = np.array(line_color)\n marker_face_color = line_color\n line_kwargs = {'lw': line_width, 'color': color,\n 'mfc': marker_face_color}\n lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,\n **line_kwargs)\n return lines\n\n def _make_handle_line(poly):\n _xs, _ys = list(zip(*poly.calc_handle_display_coords()))\n line_width = 4\n line_color = (0, 1, 0)\n color = np.array(line_color)\n marker_face_color = line_color\n line_kwargs = {'lw': line_width, 'color': color, 'mfc': marker_face_color}\n lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,\n **line_kwargs)\n return lines\n\n def calc_tag_position(poly):\n r\"\"\"\n\n CommandLine:\n python -m plottool_ibeis.interact_annotations --test-calc_tag_position --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from plottool_ibeis.interact_annotations import * # NOQA\n >>> poly = ut.DynStruct()\n >>> poly.basecoords = vt.verts_from_bbox([0, 0, 400, 400], True)\n >>> poly.theta = 0\n >>> poly.xy = vt.verts_from_bbox([0, 0, 400, 400], True)\n >>> tagpos = poly.calc_tag_position()\n >>> print('tagpos = %r' % (tagpos,))\n \"\"\"\n points = [[\n max(list(zip(*poly.basecoords))[0]),\n min(list(zip(*poly.basecoords))[1])\n ]]\n tagpos = rotate_points_around(points, poly.theta, *points_center(poly.xy))[0]\n return tagpos\n\n def calc_handle_display_coords(poly):\n img_h = poly.manager.img.shape[0]\n handle_length = img_h // 32\n #MIN_HANDLE_LENGTH = 25\n #handle_length = MIN_HANDLE_LENGTH\n #handle_length = max(MIN_HANDLE_LENGTH, (h / 4))\n cx, cy = points_center(poly.xy)\n w, h = vt.get_pointset_extent_wh(np.array(poly.basecoords))\n x0, y0 = cx, (cy - (h / 2)) # start at top edge\n x1, y1 = (x0, y0 - handle_length)\n pts = [(x0, y0), (x1, y1)]\n pts = rotate_points_around(pts, poly.theta, cx, cy)\n return pts\n\n def update_color(poly, selected=False, editing_parts=False):\n if editing_parts:\n poly.lines.set_color(df2.PINK)\n elif selected:\n # Add selected color\n sel_color = df2.ORANGE if poly.is_orig else df2.LIGHT_BLUE\n poly.lines.set_color(sel_color)\n else:\n line = poly.lines\n line_color = line.get_color()\n desel_color = df2.WHITE if poly.is_orig else df2.LIGHTGRAY\n if np.any(line_color != np.array(desel_color)):\n line.set_color(np.array(desel_color))\n\n def update_lines(poly):\n poly.lines.set_data(list(zip(*poly.xy)))\n poly.handle.set_data(list(zip(*poly.calc_handle_display_coords())))\n\n def set_species(poly, text):\n poly.tctext = text\n poly.species_tag.set_text(text)\n\n def increment_species(poly, amount=1):\n if len(poly.tab_list) > 0:\n tci = (poly.tcindex + amount) % len(poly.tab_list)\n poly.tcindex = tci\n # All tab is going to do is go through the possibilities\n poly.species_tag.set_text(poly.tab_list[poly.tcindex])\n\n def resize_poly(poly, x, y, idx, ax):\n \"\"\"\n Resize a rectangle using idx as the given anchor point. Respects\n current rotation.\n\n CommandLine:\n python -m plottool_ibeis.interact_annotations --exec-resize_poly --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from plottool_ibeis.interact_annotations import * # NOQA\n >>> (h, w) = img.shape[0:2]\n >>> x1, y1 = 10, 10\n >>> x2, y2 = w - 10, h - 10\n >>> coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))\n >>> x = 3 * w / 4\n >>> y = 3 * h / 4\n >>> idx = 3\n >>> resize_poly(poly, x, y, idx)\n >>> update_UI()\n >>> import plottool_ibeis as pt\n >>> pt.show_if_requested()\n \"\"\"\n # TODO: allow resize by middle click to scale from the center\n # the minus one is because the last coordinate is duplicated (by\n # matplotlib) to get a closed polygon\n tmpcoords = poly.xy[:-1]\n idx = idx % len(tmpcoords)\n previdx = (idx - 1) % len(tmpcoords)\n nextidx = (idx + 1) % len(tmpcoords)\n (dx, dy) = (x - poly.xy[idx][0], y - poly.xy[idx][1])\n # Fudge factor is due to gravity vectors constants\n fudge_factor = (idx) * TAU / 4\n poly_theta = poly.theta + fudge_factor\n\n polar_idx2prev = polarDelta(tmpcoords[idx], tmpcoords[previdx])\n polar_idx2next = polarDelta(tmpcoords[idx], tmpcoords[nextidx])\n tmpcoords[idx] = (tmpcoords[idx][0] + dx, tmpcoords[idx][1] + dy)\n mag_delta = np.linalg.norm((dx, dy))\n theta_delta = np.arctan2(dy, dx)\n theta_rot = theta_delta - (poly_theta + TAU / 4)\n rotx = mag_delta * np.cos(theta_rot)\n roty = mag_delta * np.sin(theta_rot)\n polar_idx2prev[0] -= rotx\n polar_idx2next[0] += roty\n tmpcoords[previdx] = apply_polarDelta(polar_idx2prev, tmpcoords[idx])\n tmpcoords[nextidx] = apply_polarDelta(polar_idx2next, tmpcoords[idx])\n\n # rotate the points by -theta to get the \"unrotated\" points for use as\n # basecoords\n tmpcoords = rotate_points_around(tmpcoords, -poly.theta,\n *points_center(poly.xy))\n # ensure the poly is closed, matplotlib might do this, but I'm not sure\n # if it preserves the ordering we depend on, even if it does add the\n # point\n tmpcoords = tmpcoords[:] + [tmpcoords[0]]\n\n dispcoords = calc_display_coords(tmpcoords, poly.theta)\n\n if (check_valid_coords(ax, dispcoords) and check_min_wh(tmpcoords)):\n poly.basecoords = tmpcoords\n poly.update_display_coords()\n\n def rotate_poly(poly, dtheta, ax):\n coords_lis = calc_display_coords(poly.basecoords, poly.theta + dtheta)\n if check_valid_coords(ax, coords_lis):\n poly.theta += dtheta\n poly.update_display_coords()\n\n def move_poly(poly, dx, dy, ax):\n new_coords = [(x + dx, y + dy) for (x, y) in poly.basecoords]\n coords_list = calc_display_coords(new_coords, poly.theta)\n if check_valid_coords(ax, coords_list):\n poly.basecoords = new_coords\n poly.update_display_coords()\n\n def update_display_coords(poly):\n poly.xy = calc_display_coords(poly.basecoords, poly.theta)\n tag_pos = poly.calc_tag_position()\n poly.species_tag.set_position((tag_pos[0] + 5, tag_pos[1]))\n poly.metadata_tag.set_position((tag_pos[0] + 5, tag_pos[1] + 50))\n\n def print_info(poly):\n print('poly = %r' % (poly,))\n print('poly.tag_text = %r' % (poly.species_tag.get_text(),))\n print('poly.metadata = %r' % (poly.metadata,))\n\n def get_poly_mask(poly, shape):\n h, w = shape[0:2]\n y, x = np.mgrid[:h, :w]\n points = np.transpose((x.ravel(), y.ravel()))\n verts = poly.xy\n path = mpl.path.Path(verts)\n mask = path.contains_points(points)\n #mask = nxutils.points_inside_poly(points, verts)\n return mask.reshape(h, w)\n\n def is_near_handle(poly, xy_pt, max_dist):\n line = poly.calc_handle_display_coords()\n return is_within_distance_from_line(xy_pt, line, max_dist)\n\n @property\n def size(poly):\n return vt.bbox_from_verts(poly.xy)[2:4]\n\n\[email protected]_metaclass(ut.ReloadingMetaclass)\nclass AnnotationInteraction(abstract_interaction.AbstractInteraction):\n \"\"\"\n An interactive polygon editor.\n\n SeeAlso:\n ibeis.viz.interact.interact_annotations2\n (ensure that any updates here are propogated there)\n\n Args:\n verts_list (list) : list of lists of (float, float)\n List of (x, y) coordinates used as vertices of the polygon.\n \"\"\"\n # --- Initialization and Figure Widgets\n def __init__(self, img, img_ind=None, commit_callback=None,\n verts_list=None,\n bbox_list=None,\n theta_list=None,\n species_list=None,\n metadata_list=None,\n line_width=4, line_color=(1, 1, 1), face_color=(0, 0, 0),\n fnum=None, default_species=DEFAULT_SPECIES_TAG,\n next_callback=None, prev_callback=None, do_mask=False,\n valid_species=[],\n **kwargs):\n super(AnnotationInteraction, self).__init__(fnum=fnum, **kwargs)\n\n self.valid_species = valid_species\n self.commit_callback = commit_callback # commit_callback\n self.but_width = .14\n #self.but_height = .08\n self.next_prev_but_height = .08\n self.but_height = self.next_prev_but_height - .01\n self.callback_funcs = dict([\n ('close_event', self.on_close),\n ('draw_event', self.draw_callback),\n ('button_press_event', self.on_click),\n ('button_release_event', self.on_click_release),\n ('figure_leave_event', self.on_figure_leave),\n ('key_press_event', self.on_key_press),\n ('motion_notify_event', self.on_motion),\n ('pick_event', self.on_pick),\n #('resize_event', self.on_resize),\n ])\n self.mpl_callback_ids = {}\n self.img = img\n self.show_species_tags = True\n self.max_dist = 10\n def _reinitialize_variables():\n self.do_mask = do_mask\n self.img_ind = img_ind\n self.species_tag = default_species\n self.showverts = True\n self.fc_default = face_color\n self.mouseX = None # mouse X coordinate\n self.mouseY = None # mouse Y coordinate\n self.ind_xy = None\n self._autoinc_polynum = it.count(0) # num polys in image\n self._poly_held = False # if any poly is active\n self._selected_poly = None # active polygon\n self.parent_poly = None # level of parts heirarchy\n self.background = None\n # Ensure nothing is down\n self.reset_mouse_state()\n _reinitialize_variables()\n # hack involving exploting lexical scoping to save defaults for a\n # restore operation\n self.reinitialize_variables = _reinitialize_variables\n\n try:\n self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)\n df2.close_figure(self.fig)\n except AttributeError:\n pass\n self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)\n\n self.reinitialize_figure(fnum=self.fnum)\n assert verts_list is None or bbox_list is None, 'only one can be specified'\n # bbox_list will get converted to verts_list\n if verts_list is not None:\n bbox_list = vt.bboxes_from_vert_list(verts_list)\n if bbox_list is not None:\n verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]\n if theta_list is None:\n theta_list = [0 for _ in verts_list]\n if species_list is None:\n species_list = [self.species_tag for _ in verts_list]\n if metadata_list is None:\n metadata_list = [None for _ in verts_list]\n\n # Create the list of polygons\n self.handle_polygon_creation(bbox_list, theta_list, species_list, metadata_list)\n self._ind = None # the active vert\n self._current_rotate_poly = None\n\n self.mpl_callback_ids = {}\n self.connect_mpl_callbacks(self.fig.canvas)\n\n self.add_action_buttons()\n self.update_callbacks(next_callback, prev_callback)\n\n def reinitialize_figure(self, fnum=None):\n self.fig.clear()\n self.fig.clf()\n #self.fig.cla()\n #ut.qflag()\n self.fnum = fnum\n #print(self.fnum)\n ax = df2.gca()\n #self.fig.ax = ax\n self.ax = ax\n df2.remove_patches(self.ax)\n df2.imshow(self.img, fnum=fnum)\n\n ax.set_clip_on(False)\n ax.set_title(('\\n'.join([\n 'Click and drag to select/move/resize/orient an ANNOTATION',\n #'Press enter to clear the species tag of the selected ANNOTATION',\n 'Press tab to cycle through annotation species',\n #'Type to edit the ANNOTATION species (press tab to autocomplete)'\n ])))\n\n def add_action_buttons(self):\n self.append_button(\n 'Add Annotation\\n' + pretty_hotkey_map(ADD_RECTANGLE_HOTKEY),\n rect=[0.18, 0.015, self.but_width, self.but_height],\n callback=self.add_new_poly\n )\n # self.append_button(\n # 'Add Full Annotation\\n' + pretty_hotkey_map(ADD_RECTANGLE_FULL_HOTKEY),\n # rect=[0.34, 0.015, self.but_width, self.but_height],\n # callback=ut.partial(self.add_new_poly, full=True)\n # )\n self.append_button(\n 'Delete Annotation\\n' + pretty_hotkey_map(DEL_RECTANGLE_HOTKEY),\n rect=[0.50, 0.015, self.but_width, self.but_height],\n callback=self.delete_current_poly\n )\n self.append_button(\n 'Save and Exit\\n' + pretty_hotkey_map(ACCEPT_SAVE_HOTKEY),\n rect=[0.66, 0.015, self.but_width, self.but_height],\n callback=self.save_and_exit\n )\n\n def disconnect_mpl_callbacks(self, canvas):\n \"\"\" disconnects all connected matplotlib callbacks \"\"\"\n for name, callbackid in six.iteritems(self.mpl_callback_ids):\n canvas.mpl_disconnect(callbackid)\n self.mpl_callback_ids = {}\n\n def connect_mpl_callbacks(self, canvas):\n \"\"\" disconnects matplotlib callbacks specified in the\n self.mpl_callback_ids dict \"\"\"\n #http://matplotlib.org/1.3.1/api/backend_bases_api.html\n # Create callback ids\n self.disconnect_mpl_callbacks(canvas)\n self.mpl_callback_ids = {\n name: canvas.mpl_connect(name, func)\n for name, func in six.iteritems(self.callback_funcs)\n }\n self.fig.canvas = canvas\n\n # --- Updates\n\n def update_callbacks(self, next_callback, prev_callback):\n self.prev_callback = prev_callback\n self.next_callback = next_callback\n # Hack because the callbacks actually need to be wrapped\n _next_callback = None if self.next_callback is None else self.next_image\n _prev_callback = None if self.prev_callback is None else self.prev_image\n self.append_button(\n 'Previous Image\\n' + pretty_hotkey_map(PREV_IMAGE_HOTKEYS),\n rect=[0.02, 0.01, self.but_width, self.next_prev_but_height],\n callback=_prev_callback,\n )\n self.append_button(\n 'Next Image\\n' + pretty_hotkey_map(NEXT_IMAGE_HOTKEYS),\n rect=[0.82, 0.01, self.but_width, self.next_prev_but_height],\n callback=_next_callback,\n )\n\n def update_image_and_callbacks(self, img, bbox_list, theta_list,\n species_list, metadata_list, next_callback,\n prev_callback):\n self.disconnect_mpl_callbacks(self.fig.canvas)\n for poly in six.itervalues(self.polys):\n poly.remove()\n self.polys = {}\n self.reinitialize_variables()\n self.img = img\n self.reinitialize_figure(fnum=self.fnum)\n self.handle_polygon_creation(bbox_list, theta_list, species_list,\n metadata_list)\n self.add_action_buttons()\n self.draw()\n self.connect_mpl_callbacks(self.fig.canvas)\n self.update_callbacks(next_callback, prev_callback)\n print('[interact_annot] drawing')\n self.draw()\n self.update_UI()\n\n def _update_poly_colors(self):\n for poly in six.itervalues(self.uneditable_polys):\n poly.update_color()\n for ind, poly in six.iteritems(self.editable_polys):\n assert poly.num == ind\n selected = poly is self._selected_poly\n editing_parts = poly is self.parent_poly\n poly.update_color(selected, editing_parts)\n self.draw()\n\n def _update_poly_lines(self):\n for poly in six.itervalues(self.uneditable_polys):\n #self.last_vert_ind = len(poly.xy) - 1\n poly.update_lines()\n for poly in six.itervalues(self.editable_polys):\n self.last_vert_ind = len(poly.xy) - 1\n poly.update_lines()\n\n def update_UI(self):\n self._update_poly_lines()\n self._update_poly_colors()\n self.fig.canvas.restore_region(self.background)\n self.draw_artists()\n self.fig.canvas.blit(self.ax.bbox)\n\n def draw_artists(self):\n for poly in six.itervalues(self.uneditable_polys):\n poly.draw_self(self.ax, editable=False)\n for poly in six.itervalues(self.editable_polys):\n poly.draw_self(self.ax, self.show_species_tags)\n\n # --- Data Matainence / Other\n\n @property\n def uneditable_polys(self):\n if self.in_edit_parts_mode:\n return {self.parent_poly.num: self.parent_poly}\n #return self.polys\n else:\n return {}\n\n @property\n def editable_polys(self):\n #return self.polys\n if self.in_edit_parts_mode:\n return self.parent_poly.child_polys\n else:\n if self.polys is None:\n self.polys = {}\n return self.polys\n\n def get_poly_under_cursor(self, x, y):\n \"\"\"\n get the index of the vertex under cursor if within max_dist tolerance\n \"\"\"\n # Remove any deleted polygons\n poly_dict = {k: v for k, v in self.editable_polys.items() if v is not None}\n if len(poly_dict) > 0:\n poly_inds = list(poly_dict.keys())\n poly_list = ut.take(poly_dict, poly_inds)\n # Put polygon coords into figure space\n poly_pts = [poly.get_transform().transform(np.asarray(poly.xy))\n for poly in poly_list]\n # Find the nearest vertex from the annotations\n ind_dist_list = [vt.nearest_point(x, y, polypts)\n for polypts in poly_pts]\n dist_lists = ut.take_column(ind_dist_list, 1)\n min_idx = np.argmin(dist_lists)\n sel_polyind = poly_inds[min_idx]\n sel_vertx, sel_dist = ind_dist_list[min_idx]\n # Ensure nearest distance is within threshold\n if sel_dist >= self.max_dist ** 2:\n sel_polyind, sel_vertx = (None, None)\n else:\n sel_polyind, sel_vertx = (None, None)\n return sel_polyind, sel_vertx\n\n def get_most_recently_added_poly(self):\n if len(self.editable_polys) == 0:\n return None\n else:\n # most recently added polygon has the highest index\n poly_ind = max(list(self.editable_polys.keys()))\n return self.editable_polys[poly_ind]\n\n def new_polygon(self, verts, theta, species, fc=(0, 0, 0),\n line_color=(1, 1, 1), line_width=4, is_orig=False,\n metadata=None):\n \"\"\" verts - list of (x, y) tuples \"\"\"\n # create new polygon from verts\n num = six.next(self._autoinc_polynum)\n poly = AnnotPoly(ax=self.ax, num=num, verts=verts, theta=theta,\n species=species, fc=fc, line_color=line_color,\n line_width=line_width, is_orig=is_orig,\n metadata=metadata, valid_species=self.valid_species,\n manager=self)\n poly.set_picker(self.is_poly_pickable)\n return poly\n\n def handle_polygon_creation(self, bbox_list, theta_list, species_list,\n metadata_list):\n \"\"\" Maintain original input \"\"\"\n assert bbox_list is not None\n if theta_list is None:\n theta_list = [0.0 for _ in range(len(bbox_list))]\n if species_list is None:\n species_list = ['' for _ in range(len(bbox_list))]\n assert len(bbox_list) == len(theta_list), 'inconconsitent data1'\n assert len(bbox_list) == len(species_list), 'inconconsitent data2'\n assert len(bbox_list) == len(metadata_list), 'inconconsitent data2'\n self.original_indices = list(range(len(bbox_list)))\n self.original_bbox_list = bbox_list\n self.original_theta_list = theta_list\n self.original_species_list = species_list\n self.original_metadata_list = metadata_list\n # Convert bbox to verticies\n verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]\n for verts in verts_list:\n verts = np.array(verts)\n for vert in verts:\n enforce_dims(self.ax, vert)\n # Create polygons\n poly_list = [self.new_polygon(verts_, theta, species, is_orig=True,\n metadata=metadata)\n for (verts_, theta, species, metadata) in\n zip(verts_list, theta_list, species_list, metadata_list)]\n self.polys = {poly.num: poly for poly in poly_list}\n if len(self.polys) != 0:\n # Select poly with largest area\n wh_list = np.array([poly.size for poly in six.itervalues(self.polys)])\n poly_index = list(self.polys.keys())[wh_list.prod(axis=1).argmax()]\n self._selected_poly = self.polys[poly_index]\n self._update_poly_colors()\n self._update_poly_lines()\n else:\n self._selected_poly = None\n # Add polygons to the axis\n for poly in six.itervalues(self.polys):\n poly.add_to_axis(self.ax)\n # Give polygons mpl change callbacks\n #for poly in six.itervalues(self.polys):\n # poly.add_callback(self.poly_changed)\n\n # --- Actions\n\n def add_new_poly(self, event=None, full=False):\n \"\"\" Adds a new annotation to the image \"\"\"\n if full:\n (h, w) = self.img.shape[0:2]\n x1, y1 = 1, 1\n x2, y2 = w - 1, h - 1\n coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))\n else:\n if self._selected_poly is not None:\n defaultshape_polys = {\n self._selected_poly.num:\n self._selected_poly\n }\n else:\n defaultshape_polys = self.editable_polys\n coords = default_vertices(self.img, defaultshape_polys,\n self.mouseX, self.mouseY)\n\n poly = self.new_polygon(verts=coords, theta=0,\n species=self.species_tag)\n poly.parent = self.parent_poly\n\n # Add to the correct place in current heirarchy\n self.editable_polys[poly.num] = poly\n poly.add_to_axis(self.ax)\n\n #self.polys[poly.num] = poly\n\n #poly.add_callback(self.poly_changed)\n self._ind = None # the active vert\n self._selected_poly = self.get_most_recently_added_poly()\n self._update_poly_lines()\n self._update_poly_colors()\n self.draw()\n\n def delete_current_poly(self, event=None):\n \"\"\"\n Removes an annotation\n \"\"\"\n if self._selected_poly is None:\n print('[interact_annot] No polygon selected to delete')\n else:\n print('[interact_annot] delete annot')\n poly = self._selected_poly\n #self.polys.pop(poly.num)\n del self.editable_polys[poly.num]\n # remove the poly from the figure itself\n poly.remove_from_axis(self.ax)\n #reset anything that has to do with current poly\n self._selected_poly = self.get_most_recently_added_poly()\n self._poly_held = False\n if self._selected_poly is not None:\n self._update_poly_colors()\n self.draw()\n\n def edit_poly_parts(self, poly):\n if poly is None and self.parent_poly is not None:\n self._selected_poly = self.parent_poly\n print('self.parent_poly = %r' % (self.parent_poly,))\n self.parent_poly = poly\n if poly is not None:\n self._selected_poly = self.get_most_recently_added_poly()\n print('self._selected_poly = %r' % (self._selected_poly,))\n if poly is None:\n self.ax.imshow(vt.convert_colorspace(self.img, 'RGB'))\n else:\n # Mask the part of the image not belonging to the annotation\n mask = poly.get_poly_mask(self.img.shape)\n masked_img = apply_mask(self.img, mask)\n self.ax.imshow(vt.convert_colorspace(masked_img, 'RGB'))\n self._update_poly_colors()\n\n @property\n def in_edit_parts_mode(self):\n return self.parent_poly is not None\n\n def toggle_species_label(self):\n print('[interact_annot] toggle_species_label()')\n self.show_species_tags = not self.show_species_tags\n self.update_UI()\n\n def save_and_exit(self, event, do_close=True):\n \"\"\"\n The Save and Exit Button\n\n write a callback to redraw viz for bbox_list\n \"\"\"\n print('[interact_annot] Pressed Accept Button')\n\n def _get_annottup_list():\n annottup_list = []\n indices_list = []\n #theta_list = []\n for poly in six.itervalues(self.polys):\n assert poly is not None\n index = poly.num\n bbox = tuple(map(int, vt.bbox_from_verts(poly.basecoords)))\n theta = poly.theta\n species = poly.species_tag.get_text()\n annottup = (bbox, theta, species)\n indices_list.append(index)\n annottup_list.append(annottup)\n return indices_list, annottup_list\n\n def _send_back_annotations():\n print('[interact_annot] _send_back_annotations')\n indices_list, annottup_list = _get_annottup_list()\n # Delete if index is in original_indices but no in indices_list\n deleted_indices = list(set(self.original_indices) -\n set(indices_list))\n changed_indices = []\n unchanged_indices = [] # sanity check\n changed_annottups = []\n new_annottups = []\n original_annottup_list = list(zip(self.original_bbox_list,\n self.original_theta_list,\n self.original_species_list))\n for index, annottup in zip(indices_list, annottup_list):\n # If the index is not in the originals then it is new\n if index not in self.original_indices:\n new_annottups.append(annottup)\n else:\n if annottup not in original_annottup_list:\n changed_annottups.append(annottup)\n changed_indices.append(index)\n else:\n unchanged_indices.append(index)\n self.commit_callback(unchanged_indices, deleted_indices,\n changed_indices, changed_annottups,\n new_annottups)\n\n if self.commit_callback is not None:\n _send_back_annotations()\n # Make mask from selection\n if self.do_mask is True:\n self.fig.clf()\n self.ax = ax = self.fig.subplot(111)\n mask_list = [poly.get_poly_mask(self.img.shape)\n for poly in six.itervalues(self.polys)]\n if len(mask_list) == 0:\n print('[interact_annot] No polygons to make mask out of')\n return 0\n mask = mask_list[0]\n for mask_ in mask_list:\n mask = np.maximum(mask, mask_)\n #mask = self.get_poly_mask()\n # User must close previous figure\n # Modify the image with the mask\n masked_img = apply_mask(self.img, mask)\n # show the modified image\n ax.imshow(masked_img)\n ax.title('Region outside of mask is darkened')\n\n ax.figure.show()\n return\n\n print('[interact_annot] Accept Over')\n if do_close:\n df2.close_figure(self.fig)\n\n # --- Connected Slots and Callbacks\n\n def next_image(self, event):\n if self.next_callback is not None:\n self.next_callback()\n\n def prev_image(self, event):\n if self.prev_callback is not None:\n self.prev_callback()\n\n def start(self):\n # FIXME: conform to abstract_interaction start conventions\n #self._ensure_running()\n #self.show_page()\n self.show()\n\n def show(self):\n self.draw()\n self.bring_to_front()\n\n def draw_callback(self, event):\n self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)\n self.draw_artists()\n\n def _show_poly_context_menu(self, event):\n def _make_options():\n metadata = self._selected_poly.metadata\n options = []\n options += [\n #('Foo: ', ut.partial(print, 'bar')),\n #('Move to back ', self._selected_poly.move_to_back),\n ('PolyInfo: ', self._selected_poly.print_info),\n ]\n if isinstance(metadata, ut.LazyDict):\n options += metadata.nocache_eval('annot_context_options')\n return options\n options = _make_options()\n self.show_popup_menu(options, event)\n\n def is_poly_pickable(self, artist, event):\n if artist.num in self.editable_polys:\n mouse_xy = event.x, event.y\n hit = artist.contains_point(mouse_xy)\n else:\n hit = False\n #import utool\n #utool.embed()\n props = {'dblclick': event.dblclick}\n return hit, props\n\n def on_pick(self, event):\n \"\"\" Makes selected polygon translucent \"\"\"\n if self.debug > 0 or True:\n print('[interact_annot] on_pick')\n if not self._poly_held:\n artist = event.artist\n print('[interact_annot] picked artist = %r' % (artist,))\n self._selected_poly = artist\n self._poly_held = True\n if event.dblclick and not self.in_edit_parts_mode:\n self.edit_poly_parts(self._selected_poly)\n pass\n #x, y = event.mouseevent.xdata, event.mouseevent.xdata\n\n def on_click(self, event):\n \"\"\"\n python -m ibeis.viz.interact.interact_annotations2 --test-ishow_image2 --show\n \"\"\"\n super(AnnotationInteraction, self).on_click(event)\n\n if self._ind is not None:\n self._ind = None\n return\n if not self.showverts:\n return\n if event.inaxes is None:\n return\n\n if len(self.editable_polys) == 0:\n print('[interact_annot] No polygons on screen')\n return\n\n # Right click - context menu\n if event.button == self.RIGHT_BUTTON:\n self._show_poly_context_menu(event)\n # Left click, indicate that a mouse button is down\n if event.button == self.LEFT_BUTTON:\n #if event.dblclick and not self.in_edit_parts_mode:\n # # On double click enter a single annotation to annotation parts\n # #print(\"DOUBLECLICK\")\n # #self.edit_poly_parts(self._selected_poly)\n if event.key == 'shift':\n self._current_rotate_poly = self._selected_poly\n else:\n # Determine if we are clicking the rotation line\n mouse_xy = (event.xdata, event.ydata)\n for poly in six.itervalues(self.editable_polys):\n if poly.is_near_handle(mouse_xy, self.max_dist):\n self._current_rotate_poly = poly\n break\n if event.dblclick:\n # Reset rotation\n if self._current_rotate_poly is not None:\n self._current_rotate_poly.theta = 0\n self._current_rotate_poly.update_display_coords()\n\n polyind, self._ind = self.get_poly_under_cursor(event.x, event.y)\n\n if self._ind is not None and polyind is not None:\n self._selected_poly = self.editable_polys[polyind]\n if self._selected_poly is None:\n return\n self.ind_xy = self._selected_poly.xy[self._ind]\n self._poly_held = True\n self._selected_poly.anchor_idx = self._ind\n\n self.mouseX, self.mouseY = event.xdata, event.ydata\n\n if self._poly_held is True or self._ind is not None:\n self._selected_poly.set_alpha(.2)\n self._update_poly_colors()\n\n self._update_poly_colors()\n self._update_poly_lines()\n\n if self.background is not None:\n self.fig.canvas.restore_region(self.background)\n else:\n print('[interact_annot] error: self.background is none.'\n ' Trying refresh.')\n self.fig.canvas.restore_region(self.background)\n self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)\n\n # Redraw blitted objects\n self.draw_artists()\n self.fig.canvas.blit(self.ax.bbox)\n\n def on_motion(self, event):\n if ut.VERBOSE:\n print('[interact_annot] on_motion')\n print('[interact_annot] Got key: %r' % event.key)\n super(AnnotationInteraction, self).on_motion(event)\n # uses boolean punning for terseness\n lastX = self.mouseX or None\n lastY = self.mouseY or None\n # Allow for getting coordinates outside the axes\n ax = self.ax\n mousePos = [event.x, event.y]\n self.mouseX, self.mouseY = ax.transData.inverted().transform(mousePos)\n deltaX = lastX is not None and self.mouseX - lastX\n deltaY = lastY is not None and self.mouseY - lastY\n\n if not self.showverts:\n return\n\n #if self.in_edit_parts_mode:\n # return\n\n quick_resize = (self._poly_held is True and (\n (event.button == self.MIDDLE_BUTTON) or\n (event.button == self.RIGHT_BUTTON) or\n (event.button == self.LEFT_BUTTON and event.key == 'ctrl')\n ))\n\n if self._poly_held is True and self._ind is not None:\n # Resize by dragging corner\n self._selected_poly.resize_poly(self.mouseX, self.mouseY,\n self._ind, self.ax)\n self._selected_poly.anchor_idx = self._ind\n elif quick_resize:\n # Quick resize with special click\n anchor_idx = self._selected_poly.anchor_idx\n idx = (anchor_idx + 2) % 4 # choose opposite anchor point\n self._selected_poly.resize_poly(self.mouseX, self.mouseY, idx,\n self.ax)\n elif self._current_rotate_poly:\n # Rotate using handle\n cx, cy = points_center(self._current_rotate_poly.xy)\n theta = np.arctan2(cy - self.mouseY, cx - self.mouseX) - TAU / 4\n dtheta = theta - self._current_rotate_poly.theta\n self._current_rotate_poly.rotate_poly(dtheta, self.ax)\n elif self._ind is None and event.button == self.LEFT_BUTTON:\n # Translate by dragging inside annot\n flag = deltaX is not None and deltaY is not None\n if self._poly_held is True and flag:\n self._selected_poly.move_poly(deltaX, deltaY, self.ax)\n self._ind = None\n else:\n return\n self.update_UI()\n\n def on_click_release(self, event):\n super(AnnotationInteraction, self).on_click_release(event)\n\n #if self._poly_held is True:\n self._poly_held = False\n\n self._current_rotate_poly = None\n\n if not self.showverts:\n return\n\n if self._selected_poly is None:\n return\n\n _flag = (\n self._ind is None or\n self._poly_held is False or\n (self._ind is not None and\n self.is_down['left'] is True and\n self._selected_poly is not None\n )\n )\n if _flag:\n self._selected_poly.set_alpha(0)\n #self._selected_poly.set_facecolor('white')\n\n self.update_UI()\n\n if self._ind is None:\n return\n\n if len(self.editable_polys) == 0:\n print('[interact_annot] No polygons on screen')\n return\n\n if self._selected_poly is None:\n print('[interact_annot] WARNING: Polygon unknown.'\n ' Using default. (2)')\n self._selected_poly = self.get_most_recently_added_poly()\n\n curr_xy = self._selected_poly.xy[self._ind]\n\n if self.ind_xy is not None:\n if np.all(np.fabs(self.ind_xy - curr_xy) < 3):\n return\n\n self._ind = None\n self._poly_held = False\n\n self.draw()\n\n def on_figure_leave(self, event):\n if self.debug > 0:\n print('[interact_annot] figure leave')\n #self.print_status()\n #self.on_click_release(event)\n self._poly_held = False\n self._ind = None\n self.reset_mouse_state()\n #self.print_status()\n\n def on_key_press(self, event):\n if self.debug > 0:\n print('[interact_annot] on_key_press')\n print('[interact_annot] Got key: %r' % event.key)\n print('[interact_annot] Got key: %r' % event.key)\n if not event.inaxes:\n return\n\n if event.key == ACCEPT_SAVE_HOTKEY:\n self.save_and_exit(event)\n elif event.key == ADD_RECTANGLE_HOTKEY:\n self.add_new_poly()\n elif event.key == ADD_RECTANGLE_FULL_HOTKEY:\n self.add_new_poly(full=True)\n elif event.key == DEL_RECTANGLE_HOTKEY:\n self.delete_current_poly()\n elif event.key == TOGGLE_LABEL_HOTKEY:\n self.toggle_species_label()\n\n if re.match('escape', event.key):\n self.edit_poly_parts(None)\n\n if re.match('^backspace$', event.key):\n self._selected_poly.set_species(DEFAULT_SPECIES_TAG)\n if re.match('^tab$', event.key):\n self._selected_poly.increment_species(amount=1)\n if re.match('^ctrl\\+tab$', event.key):\n self._selected_poly.increment_species(amount=-1)\n\n # NEXT ANND PREV COMMAND\n def _matches_hotkey(key, hotkeys):\n return any([re.match(hk, key) is not None for hk in\n ut.ensure_iterable(hotkeys)])\n\n if _matches_hotkey(event.key, PREV_IMAGE_HOTKEYS):\n self.prev_image(event)\n if _matches_hotkey(event.key, NEXT_IMAGE_HOTKEYS):\n self.next_image(event)\n self.draw()\n\n #def poly_changed(self, poly):\n # \"\"\" this method is called whenever the polygon object is called \"\"\"\n # print('poly_changed poly=%r' % (poly,))\n # # only copy the artist props to the line (except visibility)\n # #vis = poly.lines.get_visible()\n # #vis = poly.handle.get_visible()\n # #poly.lines.set_visible(vis)\n # #poly.handle.set_visible(vis)\n\n\ndef pretty_hotkey_map(hotkeys):\n if hotkeys is None:\n return ''\n hotkeys = [hotkeys] if not isinstance(hotkeys, list) else hotkeys\n mapping = {\n #'right': 'right arrow',\n #'left': 'left arrow',\n }\n mapped_hotkeys = [mapping.get(hk, hk) for hk in hotkeys]\n hotkey_str = '(' + ut.conj_phrase(mapped_hotkeys, 'or') + ')'\n return hotkey_str\n\n\ndef apply_mask(img, mask):\n masked_img = img.copy()\n masked_img[~mask] = np.uint8(np.clip(masked_img[~mask] - 100., 0, 255))\n return masked_img\n\n\ndef points_center(pts):\n # the polygons have the first point listed twice in order for them to be\n # drawn as closed, but that point shouldn't be counted twice for computing\n # the center (hence the [:-1] slice)\n return np.array(pts[:-1]).mean(axis=0)\n\n\ndef rotate_points_around(points, theta, ax, ay):\n \"\"\"\n References:\n http://www.euclideanspace.com/maths/geometry/affine/aroundPoint/matrix2d/\n \"\"\"\n # TODO: Can use vtool_ibeis for this\n sin, cos, array = np.sin, np.cos, np.array\n augpts = array([array((x, y, 1)) for (x, y) in points])\n ct = cos(theta)\n st = sin(theta)\n # correct matrix obtained from\n rot_mat = array(\n [(ct, -st, ax - ct * ax + st * ay),\n (st, ct, ay - st * ax - ct * ay),\n ( 0, 0, 1)]\n )\n return [(x, y) for (x, y, z) in rot_mat.dot(augpts.T).T]\n\n\ndef calc_display_coords(oldcoords, theta):\n return rotate_points_around(oldcoords, theta, *points_center(oldcoords))\n\n\ndef polarDelta(p1, p2):\n mag = vt.L2(p1, p2)\n theta = np.arctan2(p2[1] - p1[1], p2[0] - p1[0])\n return [mag, theta]\n\n\ndef apply_polarDelta(poldelt, cart):\n newx = cart[0] + (poldelt[0] * np.cos(poldelt[1]))\n newy = cart[1] + (poldelt[0] * np.sin(poldelt[1]))\n return (newx, newy)\n\n\ndef is_within_distance_from_line(pt, line, max_dist):\n pt = np.array(pt)\n line = np.array(line)\n return vt.distance_to_lineseg(pt, line[0], line[1]) <= max_dist\n\n\ndef check_min_wh(coords):\n \"\"\"\n Depends on hardcoded indices, which is inelegant, but\n we're already depending on those for the FUDGE_FACTORS\n array above\n 0----1\n | |\n 3----2\n \"\"\"\n MIN_W = 5\n MIN_H = 5\n # the seperate 1 and 2 variables are not strictly necessary, but\n # provide a sanity check to ensure that we're dealing with the\n # right shape\n #w, h = vt.get_pointset_extent_wh(np.array(coords))\n w1 = coords[1][0] - coords[0][0]\n w2 = coords[2][0] - coords[3][0]\n h1 = coords[3][1] - coords[0][1]\n h2 = coords[2][1] - coords[1][1]\n assert np.isclose(w1, w2), ('w1: %r, w2: %r' % (w1, w2))\n assert np.isclose(h1, h2), ('h1: %r, h2: %r' % (h1, h2))\n w, h = w1, h1\n #print('w, h = (%r, %r)' % (w1, h1))\n return (MIN_W < w) and (MIN_H < h)\n\n\ndef default_vertices(img, polys=None, mouseX=None, mouseY=None):\n \"\"\"Default to rectangle that has a quarter-width/height border.\"\"\"\n (h, w) = img.shape[0:2]\n # Center the new verts around wherever the mouse is\n if mouseX is not None and mouseY is not None:\n center_x = mouseX\n center_h = mouseY\n else:\n center_x = w // 2\n center_h = h // 2\n\n if polys is not None and len(polys) > 0:\n # Use the largest polygon size as the default verts\n wh_list = np.array([vt.bbox_from_verts(poly.xy)[2:4]\n for poly in six.itervalues(polys)])\n w_, h_ = wh_list.max(axis=0) // 2\n else:\n # If no poly exists use 1/4 of the image size\n w_, h_ = (w // 4, h // 4)\n # Get the x/y extents by offseting the centers\n x1, x2 = np.array([center_x, center_x]) + (w_ * np.array([-1, 1]))\n y1, y2 = np.array([center_h, center_h]) + (h_ * np.array([-1, 1]))\n # Clip to bounds\n x1 = max(x1, 1)\n y1 = max(y1, 1)\n x2 = min(x2, w - 1)\n y2 = min(y2, h - 1)\n return ((x1, y1), (x1, y2), (x2, y2), (x2, y1))\n\n\ndef check_valid_coords(ax, coords_list):\n return all([check_dims(ax, xy_pt) for xy_pt in coords_list])\n\n\ndef check_dims(ax, xy_pt, margin=0.5):\n \"\"\"\n checks if bounding box dims are ok\n\n Allow the bounding box to go off the image\n so orientations can be done correctly\n \"\"\"\n num_out = 0\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n if xy_pt[0] < xlim[0] + margin:\n num_out += 1\n if xy_pt[0] > xlim[1] - margin:\n num_out += 1\n if xy_pt[1] < ylim[1] + margin:\n num_out += 1\n if xy_pt[1] > ylim[0] - margin:\n num_out += 1\n return num_out <= 3\n\n\ndef enforce_dims(ax, xy_pt, margin=0.5):\n \"\"\"\n ONLY USE THIS ON UNROTATED RECTANGLES, as to do otherwise may yield\n arbitrary polygons\n \"\"\"\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n if xy_pt[0] < xlim[0] + margin:\n xy_pt[0] = xlim[0] + margin\n if xy_pt[0] > xlim[1] - margin:\n xy_pt[0] = xlim[1] - margin\n if xy_pt[1] < ylim[1] + margin:\n xy_pt[1] = ylim[1] + margin\n if xy_pt[1] > ylim[0] - margin:\n xy_pt[1] = ylim[0] - margin\n return True\n\n\ndef test_interact_annots():\n r\"\"\"\n CommandLine:\n python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from plottool_ibeis.interact_annotations import * # NOQA\n >>> import plottool_ibeis as pt\n >>> # build test data\n >>> # execute function\n >>> self = test_interact_annots()\n >>> # verify results\n >>> print(self)\n >>> pt.show_if_requested()\n \"\"\"\n print('[interact_annot] *** START DEMO ***')\n verts_list = [\n ((0, 400), (400, 400), (400, 0), (0, 0), (0, 400)),\n ((400, 700), (700, 700), (700, 400), (400, 400), (400, 700))\n ]\n #if img is None:\n try:\n img_url = 'http://i.imgur.com/Vq9CLok.jpg'\n img_fpath = ut.grab_file_url(img_url)\n img = vt.imread(img_fpath)\n except Exception as ex:\n print('[interact_annot] cant read zebra: %r' % ex)\n img = np.random.uniform(0, 255, size=(100, 100))\n valid_species = ['species1', 'species2']\n metadata_list = [{'name': 'foo'}, None]\n self = AnnotationInteraction(img, verts_list=verts_list,\n valid_species=valid_species,\n metadata_list=metadata_list,\n fnum=0) # NOQA\n return self\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m plottool_ibeis.interact_annotations --exec-test_interact_annots --show\n CommandLine:\n python -m plottool_ibeis.interact_annotations\n python -m plottool_ibeis.interact_annotations --allexamples\n python -m plottool_ibeis.interact_annotations --allexamples --noface --nosrc\n \"\"\"\n import multiprocessing\n multiprocessing.freeze_support() # for win32\n import utool as ut # NOQA\n ut.doctest_funcs()\n"
] | [
[
"matplotlib.lines.Line2D",
"numpy.array",
"numpy.isclose",
"numpy.linalg.norm",
"matplotlib.path.Path",
"numpy.sin",
"numpy.argmin",
"numpy.asarray",
"numpy.fabs",
"numpy.random.uniform",
"numpy.arctan2",
"numpy.cos",
"numpy.clip",
"numpy.maximum"
]
] |
thuzhf/Advanced-Machine-Learning | [
"fa79efd16fea34f2b5b95425058b70b546307ed4"
] | [
"de-anonymization/src/mnist_input_data.py"
] | [
"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functions for downloading and reading MNIST data.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\n\nimport numpy\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nSOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'\n\n\ndef maybe_download(filename, work_directory):\n \"\"\"Download the data from Yann's website, unless it's already here.\"\"\"\n if not tf.gfile.Exists(work_directory):\n tf.gfile.MakeDirs(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.Size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath\n\n\ndef _read32(bytestream):\n dt = numpy.dtype(numpy.uint32).newbyteorder('>')\n return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]\n\n\ndef extract_images(filename):\n \"\"\"Extract the images into a 4D uint8 numpy array [index, y, x, depth].\"\"\"\n print('Extracting', filename)\n with tf.gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data\n\n\ndef dense_to_one_hot(labels_dense, num_classes=10):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n\ndef extract_labels(filename, one_hot=False):\n \"\"\"Extract the labels into a 1D uint8 numpy array [index].\"\"\"\n print('Extracting', filename)\n with tf.gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels)\n return labels\n\n\nclass DataSet(object):\n\n def __init__(self, images, labels, fake_data=False, one_hot=False,\n dtype=tf.float32):\n \"\"\"Construct a DataSet.\n\n one_hot arg is used only if fake_data is true. `dtype` can be either\n `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into\n `[0, 1]`.\n \"\"\"\n dtype = tf.as_dtype(dtype).base_dtype\n if dtype not in (tf.uint8, tf.float32):\n raise TypeError('Invalid image dtype %r, expected uint8 or float32' %\n dtype)\n if fake_data:\n self._num_examples = 10000\n self.one_hot = one_hot\n else:\n assert images.shape[0] == labels.shape[0], (\n 'images.shape: %s labels.shape: %s' % (images.shape,\n labels.shape))\n self._num_examples = images.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n assert images.shape[3] == 1\n images = images.reshape(images.shape[0],\n images.shape[1] * images.shape[2])\n if dtype == tf.float32:\n # Convert from [0, 255] -> [0.0, 1.0].\n images = images.astype(numpy.float32)\n images = numpy.multiply(images, 1.0 / 255.0)\n self._images = images\n self._labels = labels\n self._epochs_completed = 0\n self._index_in_epoch = 0\n\n @property\n def images(self):\n return self._images\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_examples(self):\n return self._num_examples\n\n @property\n def epochs_completed(self):\n return self._epochs_completed\n\n def next_batch(self, batch_size, fake_data=False):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]\n\n\ndef read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):\n class DataSets(object):\n pass\n data_sets = DataSets()\n\n if fake_data:\n def fake():\n return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)\n data_sets.train = fake()\n data_sets.validation = fake()\n data_sets.test = fake()\n return data_sets\n\n TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'\n TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'\n TEST_IMAGES = 't10k-images-idx3-ubyte.gz'\n TEST_LABELS = 't10k-labels-idx1-ubyte.gz'\n VALIDATION_SIZE = 5000\n\n local_file = maybe_download(TRAIN_IMAGES, train_dir)\n train_images = extract_images(local_file)\n\n local_file = maybe_download(TRAIN_LABELS, train_dir)\n train_labels = extract_labels(local_file, one_hot=one_hot)\n\n local_file = maybe_download(TEST_IMAGES, train_dir)\n test_images = extract_images(local_file)\n\n local_file = maybe_download(TEST_LABELS, train_dir)\n test_labels = extract_labels(local_file, one_hot=one_hot)\n\n validation_images = train_images[:VALIDATION_SIZE]\n validation_labels = train_labels[:VALIDATION_SIZE]\n train_images = train_images[VALIDATION_SIZE:]\n train_labels = train_labels[VALIDATION_SIZE:]\n\n data_sets.train = DataSet(train_images, train_labels, dtype=dtype)\n data_sets.validation = DataSet(validation_images, validation_labels,\n dtype=dtype)\n data_sets.test = DataSet(test_images, test_labels, dtype=dtype)\n\n data_sets.n_in = data_sets.train._images.shape[1] # compatible with my own `class DataSet`\n\n return data_sets\n"
] | [
[
"numpy.zeros",
"tensorflow.as_dtype",
"tensorflow.gfile.Open",
"tensorflow.gfile.Exists",
"numpy.random.shuffle",
"tensorflow.gfile.GFile",
"numpy.multiply",
"tensorflow.gfile.MakeDirs",
"numpy.arange",
"numpy.frombuffer",
"numpy.dtype"
]
] |
Briggybros/Uni-Deep-Learning | [
"5225130435356f1d7fc4c8bdbb3dcc34f9bef964"
] | [
"Lab_4_gs15687/cifar_augment.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport os\nimport os.path\n\nimport tensorflow as tf\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'labsheets', 'CIFAR10'))\nimport cifar10 as cf\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('data-dir', os.getcwd() + '/dataset/',\n 'Directory where the dataset will be stored and checkpoint. (default: %(default)s)')\ntf.app.flags.DEFINE_integer('max-steps', 10000,\n 'Number of mini-batches to train on. (default: %(default)d)')\ntf.app.flags.DEFINE_integer('log-frequency', 10,\n 'Number of steps between logging results to the console and saving summaries (default: %(default)d)')\ntf.app.flags.DEFINE_integer('save-model', 1000,\n 'Number of steps between model saves (default: %(default)d)')\n\n# Optimisation hyperparameters\ntf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)')\ntf.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)')\ntf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)')\ntf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)')\ntf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)')\ntf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)')\ntf.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()),\n 'Directory where to write event logs and checkpoint. (default: %(default)s)')\n\n\nrun_log_dir = os.path.join(FLAGS.log_dir,\n 'exp_BN_bs_{bs}_lr_{lr}_aug_flip_brightness'.format(bs=FLAGS.batch_size,\n lr=FLAGS.learning_rate))\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name='weights')\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name='biases')\n\ndef deepnn(x, train):\n \"\"\"deepnn builds the graph for a deep net for classifying CIFAR10 images.\n\n Args:\n x: an input tensor with the dimensions (N_examples, 3072), where 3072 is the\n number of pixels in a standard CIFAR10 image.\n\n Returns:\n y: is a tensor of shape (N_examples, 10), with values\n equal to the logits of classifying the object images into one of 10 classes\n (airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck)\n img_summary: a string tensor containing sampled input images.\n \"\"\"\n # Reshape to use within a convolutional neural net. Last dimension is for\n # 'features' - it would be 1 one for a grayscale image, 3 for an RGB image,\n # 4 for RGBA, etc.\n\n x_image = tf.reshape(x, [-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels])\n\n x_image = tf.cond(train, lambda: tf.map_fn(tf.image.random_flip_left_right, x_image), lambda: x_image)\n x_image = tf.cond(train, lambda: tf.map_fn(lambda x: tf.image.random_brightness(x, 0.5), x_image), lambda: x_image)\n\n img_summary = tf.summary.image('Input_images', x_image)\n\n # First convolutional layer - maps one image to 32 feature maps.\n with tf.variable_scope('Conv_1'):\n conv1 = tf.layers.conv2d(\n inputs=x_image,\n filters=32,\n kernel_size=[5,5],\n padding='same',\n use_bias=False,\n name='conv1'\n )\n conv1_bn = tf.nn.relu(tf.layers.batch_normalization(conv1, training=train))\n pool1 = tf.layers.max_pooling2d(\n inputs=conv1_bn,\n pool_size=[2, 2],\n strides=2,\n name='pool1'\n )\n\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5,5],\n padding='same',\n use_bias=False,\n name='conv2'\n )\n conv2_bn = tf.nn.relu(tf.layers.batch_normalization(conv2, training=train))\n pool2 = tf.layers.max_pooling2d(\n inputs=conv2_bn,\n pool_size=[2, 2],\n strides=2,\n name='pool2'\n )\n\n v = tf.reshape(pool2, [-1, 4096])\n\n fc1 = tf.layers.dense(\n inputs=v,\n units=1024,\n activation=tf.nn.relu,\n use_bias=True,\n name='fc1'\n )\n\n fc2 = tf.layers.dense(\n inputs=fc1,\n units=1024,\n activation=tf.nn.relu,\n use_bias=True,\n name='fc2'\n )\n\n out = tf.layers.dense(\n inputs=fc2,\n units=10,\n activation=None,\n use_bias=False,\n name='out'\n )\n\n return out, img_summary\n\n\ndef main(_):\n tf.reset_default_graph()\n\n # Import data\n cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir)\n\n with tf.variable_scope('inputs'):\n # Create the model\n x = tf.placeholder(tf.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels])\n # Define loss and optimizer\n y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes])\n # Whether model is training\n train = tf.placeholder(tf.bool, [])\n\n # Build the graph for the deep net\n y_conv, img_summary = deepnn(x, train)\n\n # Define your loss function - softmax_cross_entropy\n with tf.variable_scope('x_entropy'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n \n # Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function\n decayed_learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, tf.Variable(0, trainable=False), 1000, 0.8)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name=\"Adam\").minimize(cross_entropy)\n\n # calculate the prediction and the accuracy\n accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1))\n \n loss_summary = tf.summary.scalar('Loss', cross_entropy)\n acc_summary = tf.summary.scalar('Accuracy', accuracy)\n\n # summaries for TensorBoard visualisation\n validation_summary = tf.summary.merge([img_summary, acc_summary])\n training_summary = tf.summary.merge([img_summary, loss_summary])\n test_summary = tf.summary.merge([img_summary, acc_summary])\n\n # saver for checkpoints\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\n\n with tf.Session() as sess:\n summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5)\n summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5)\n\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n # Training and validation\n for step in range(FLAGS.max_steps):\n # Training: Backpropagation using train set\n (trainImages, trainLabels) = cifar.getTrainBatch()\n (testImages, testLabels) = cifar.getTestBatch()\n \n _, summary_str = sess.run([optimiser, training_summary], feed_dict={x: trainImages, y_: trainLabels, train: True})\n\n \n if step % (FLAGS.log_frequency + 1) == 0:\n summary_writer.add_summary(summary_str, step)\n\n ## Validation: Monitoring accuracy using validation set\n if step % FLAGS.log_frequency == 0:\n accuracy, summary_str = sess.run([acc_op, validation_summary], feed_dict={x: testImages, y_: testLabels, train: False})\n print('step %d, accuracy on validation batch: %g' % (step, accuracy))\n summary_writer_validation.add_summary(summary_str, step)\n\n ## Save the model checkpoint periodically.\n if step % FLAGS.save_model == 0 or (step + 1) == FLAGS.max_steps:\n checkpoint_path = os.path.join(run_log_dir + '_train', 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n\n # Testing\n\n # resetting the internal batch indexes\n cifar.reset()\n evaluated_images = 0\n test_accuracy = 0\n batch_count = 0\n\n # don't loop back when we reach the end of the test set\n while evaluated_images != cifar.nTestSamples:\n (testImages, testLabels) = cifar.getTestBatch(allowSmallerBatches=True)\n test_accuracy_temp, _ = sess.run([acc_op, test_summary], feed_dict={x: testImages, y_: testLabels, train: False})\n\n batch_count = batch_count + 1\n test_accuracy = test_accuracy + test_accuracy_temp\n evaluated_images = evaluated_images + testLabels.shape[0]\n\n test_accuracy = test_accuracy / batch_count\n print('test set: accuracy on test set: %0.3f' % test_accuracy)\n\n\n\nif __name__ == '__main__':\n tf.app.run(main=main)\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.reshape",
"tensorflow.control_dependencies",
"tensorflow.local_variables_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.Variable",
"tensorflow.global_variables",
"tensorflow.layers.batch_normalization",
"tensorflow.image.random_brightness",
"tensorflow.constant",
"tensorflow.variable_scope",
"tensorflow.layers.dense",
"tensorflow.app.run",
"tensorflow.get_collection",
"tensorflow.summary.merge",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.Session",
"tensorflow.map_fn",
"tensorflow.truncated_normal",
"tensorflow.layers.conv2d",
"tensorflow.placeholder",
"tensorflow.summary.image",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.layers.max_pooling2d",
"tensorflow.reset_default_graph",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.summary.FileWriter"
]
] |
eugene87222/kornia | [
"5f9b06b8d64bcd43eebd186ae21d22a1d0165b5e"
] | [
"test/morphology/test_bottom_hat.py"
] | [
"import pytest\nimport torch\nfrom torch.autograd import gradcheck\nfrom torch.testing import assert_allclose\n\nfrom kornia.morphology import bottom_hat\n\n\nclass TestBottomHat:\n def test_smoke(self, device, dtype):\n kernel = torch.rand(3, 3, device=device, dtype=dtype)\n assert kernel is not None\n\n @pytest.mark.parametrize(\"shape\", [(1, 3, 4, 4), (2, 3, 2, 4), (3, 3, 4, 1), (3, 2, 5, 5)])\n @pytest.mark.parametrize(\"kernel\", [(3, 3), (5, 5)])\n def test_cardinality(self, device, dtype, shape, kernel):\n img = torch.ones(shape, device=device, dtype=dtype)\n krnl = torch.ones(kernel, device=device, dtype=dtype)\n assert bottom_hat(img, krnl).shape == shape\n\n def test_kernel(self, device, dtype):\n tensor = torch.tensor([[0.5, 1.0, 0.3], [0.7, 0.3, 0.8], [0.4, 0.9, 0.2]], device=device, dtype=dtype)[\n None, None, :, :\n ]\n kernel = torch.tensor([[0.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0]], device=device, dtype=dtype)\n expected = torch.tensor([[0.2, 0.0, 0.5], [0.0, 0.4, 0.0], [0.3, 0.0, 0.6]], device=device, dtype=dtype)[\n None, None, :, :\n ]\n assert_allclose(bottom_hat(tensor, kernel), expected, atol=1e-3, rtol=1e-3)\n\n def test_structural_element(self, device, dtype):\n tensor = torch.tensor([[0.5, 1.0, 0.3], [0.7, 0.3, 0.8], [0.4, 0.9, 0.2]], device=device, dtype=dtype)[\n None, None, :, :\n ]\n structural_element = torch.tensor(\n [[-1.0, 0.0, -1.0], [0.0, 0.0, 0.0], [-1.0, 0.0, -1.0]], device=device, dtype=dtype\n )\n expected = torch.tensor([[0.2, 0.0, 0.5], [0.0, 0.4, 0.0], [0.3, 0.0, 0.6]], device=device, dtype=dtype)[\n None, None, :, :\n ]\n assert_allclose(\n bottom_hat(tensor, torch.ones_like(structural_element), structuring_element=structural_element), expected,\n atol=1e-3, rtol=1e-3\n )\n\n def test_exception(self, device, dtype):\n input = torch.ones(1, 1, 3, 4, device=device, dtype=dtype)\n kernel = torch.ones(3, 3, device=device, dtype=dtype)\n\n with pytest.raises(TypeError):\n assert bottom_hat([0.0], kernel)\n\n with pytest.raises(TypeError):\n assert bottom_hat(input, [0.0])\n\n with pytest.raises(ValueError):\n test = torch.ones(2, 3, 4, device=device, dtype=dtype)\n assert bottom_hat(test, kernel)\n\n with pytest.raises(ValueError):\n test = torch.ones(2, 3, 4, device=device, dtype=dtype)\n assert bottom_hat(input, test)\n\n @pytest.mark.grad\n def test_gradcheck(self, device, dtype):\n input = torch.rand(2, 3, 4, 4, requires_grad=True, device=device, dtype=torch.float64)\n kernel = torch.rand(3, 3, requires_grad=True, device=device, dtype=torch.float64)\n assert gradcheck(bottom_hat, (input, kernel), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n op = bottom_hat\n op_script = torch.jit.script(op)\n\n input = torch.rand(1, 2, 7, 7, device=device, dtype=dtype)\n kernel = torch.ones(3, 3, device=device, dtype=dtype)\n\n actual = op_script(input, kernel)\n expected = op(input, kernel)\n\n assert_allclose(actual, expected)\n"
] | [
[
"torch.rand",
"torch.autograd.gradcheck",
"torch.ones",
"torch.tensor",
"torch.testing.assert_allclose",
"torch.ones_like",
"torch.jit.script"
]
] |
takuma-ynd/pommerman-network | [
"8dfd5aae65262d0a404ddd75bc130081b6fdb638"
] | [
"pommerman/envs/v0.py"
] | [
"\"\"\"The baseline Pommerman environment.\n\nThis evironment acts as game manager for Pommerman. Further environments,\nsuch as in v1.py, will inherit from this.\n\"\"\"\nimport json\nimport os\n\nimport numpy as np\nimport time\nfrom gym import spaces\nfrom gym.utils import seeding\nimport gym\n\nfrom .. import characters\nfrom .. import constants\nfrom .. import forward_model\nfrom .. import graphics\nfrom .. import utility\n\n\nclass Pomme(gym.Env):\n '''The base pommerman env.'''\n metadata = {\n 'render.modes': ['human', 'rgb_array', 'rgb_pixel'],\n }\n\n def __init__(self,\n render_fps=None,\n game_type=None,\n board_size=None,\n agent_view_size=None,\n num_rigid=None,\n num_wood=None,\n num_items=None,\n max_steps=1000,\n is_partially_observable=False,\n bomberman_like=False,\n env=None,\n **kwargs):\n self._render_fps = render_fps\n self._intended_actions = []\n self._agents = None\n self._game_type = game_type\n self._board_size = board_size\n self._agent_view_size = agent_view_size\n self._num_rigid = num_rigid\n self._num_wood = num_wood\n self._num_items = num_items\n self._max_steps = max_steps\n self._viewer = None\n self._is_partially_observable = is_partially_observable\n self._bomberman_like = bomberman_like\n self._env = env\n\n self.training_agent = None\n self.model = forward_model.ForwardModel()\n\n # This can be changed through set_render_mode\n # or from the cli tool using '--render_mode=MODE_TYPE'\n self._mode = 'human'\n\n # Observation and Action Spaces. These are both geared towards a single\n # agent even though the environment expects actions and returns\n # observations for all four agents. We do this so that it's clear what\n # the actions and obs are for a single agent. Wrt the observations,\n # they are actually returned as a dict for easier understanding.\n self._set_action_space()\n self._set_observation_space()\n\n def _set_action_space(self):\n self.action_space = spaces.Discrete(6)\n\n def set_render_mode(self, mode):\n self._mode = mode\n\n def _set_observation_space(self):\n \"\"\"The Observation Space for each agent.\n\n There are a total of 3*board_size^2+12 observations:\n - all of the board (board_size^2)\n - bomb blast strength (board_size^2).\n - bomb life (board_size^2)\n - agent's position (2)\n - player ammo counts (1)\n - blast strength (1)\n - can_kick (1)\n - teammate (one of {AgentDummy.value, Agent3.value}).\n - enemies (three of {AgentDummy.value, Agent3.value}).\n \"\"\"\n bss = self._board_size**2\n min_obs = [0] * 3 * bss + [0] * 5 + [constants.Item.AgentDummy.value\n ] * 4\n max_obs = [len(constants.Item)] * bss + [self._board_size\n ] * bss + [25] * bss\n max_obs += [self._board_size] * 2 + [self._num_items] * 2 + [1]\n max_obs += [constants.Item.Agent3.value] * 4\n self.observation_space = spaces.Box(\n np.array(min_obs), np.array(max_obs))\n\n def set_agents(self, agents):\n self._agents = agents\n\n def set_training_agent(self, agent_id):\n self.training_agent = agent_id\n\n def set_init_game_state(self, game_state_file):\n \"\"\"Set the initial game state.\n\n The expected game_state_file JSON format is:\n - agents: list of agents serialized (agent_id, is_alive, position,\n ammo, blast_strength, can_kick)\n - board: board matrix topology (board_size^2)\n - board_size: board size\n - bombs: list of bombs serialized (position, bomber_id, life,\n blast_strength, moving_direction)\n - flames: list of flames serialized (position, life)\n - items: list of item by position\n - step_count: step count\n\n Args:\n game_state_file: JSON File input.\n \"\"\"\n self._init_game_state = None\n if game_state_file:\n with open(game_state_file, 'r') as f:\n self._init_game_state = json.loads(f.read())\n\n def make_board(self):\n self._board = utility.make_board(self._board_size, self._num_rigid,\n self._num_wood, len(self._agents), self._bomberman_like)\n\n def make_items(self):\n self._items = utility.make_items(self._board, self._num_items)\n\n def act(self, obs):\n agents = [agent for agent in self._agents \\\n if agent.agent_id != self.training_agent]\n return self.model.act(agents, obs, self.action_space)\n\n def notify_obs(self, obs, waiting=False):\n for agent in self._agents:\n if hasattr(agent, 'notify_obs'):\n agent.notify_obs(obs[agent.agent_id], waiting)\n\n def get_observations(self):\n self.observations = self.model.get_observations(\n self._board, self._agents, self._bombs, self._flames,\n self._is_partially_observable, self._agent_view_size,\n self._game_type, self._env)\n for obs in self.observations:\n obs['step_count'] = self._step_count\n return self.observations\n\n def _get_rewards(self):\n return self.model.get_rewards(self._agents, self._game_type,\n self._step_count, self._max_steps)\n\n def _get_done(self):\n return self.model.get_done(self._agents, self._step_count,\n self._max_steps, self._game_type,\n self.training_agent)\n\n def _get_info(self, done, rewards):\n return self.model.get_info(done, rewards, self._game_type, self._agents)\n\n def reset(self):\n assert (self._agents is not None)\n\n if self._init_game_state is not None:\n self.set_json_info()\n else:\n self._step_count = 0\n self.make_board()\n self.make_items()\n self._bombs = []\n self._flames = []\n self._powerups = []\n for agent_id, agent in enumerate(self._agents):\n pos = np.where(self._board == utility.agent_value(agent_id))\n row = pos[0][0]\n col = pos[1][0]\n agent.set_start_position((row, col))\n agent.reset()\n\n return self.get_observations()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, actions):\n self._intended_actions = actions\n\n max_blast_strength = self._agent_view_size or 10\n result = self.model.step(\n actions,\n self._board,\n self._agents,\n self._bombs,\n self._items,\n self._flames,\n max_blast_strength=max_blast_strength)\n self._board, self._agents, self._bombs, self._items, self._flames = \\\n result[:5]\n\n done = self._get_done()\n obs = self.get_observations()\n reward = self._get_rewards()\n info = self._get_info(done, reward)\n\n # additional logic to terminate the game:\n # Whenever one of human agents is killed, the game terminates.\n # Note that I don't modify the reward accordingly\n if not done: # if already done, don't bother.\n human_control_agents = [agent for agent in self._agents if hasattr(agent, '_is_human_controlled') and agent._is_human_controlled]\n other_agents = [agent for agent in self._agents if agent not in human_control_agents]\n\n n_dead_human_agents = sum(not agent.is_alive for agent in human_control_agents)\n n_dead_other_agents = sum(not agent.is_alive for agent in other_agents)\n if n_dead_human_agents > 0:\n done = True\n\n # if both human-agents are dead, it should've been handled already.\n if n_dead_human_agents == 2:\n print(\"WARNING: n_dead_human_agents==2 but not done...\")\n\n # if no enemies are dead, penalize human\n if n_dead_other_agents == 0:\n reward = [-1 if agent in human_control_agents else 1 for agent in self._agents]\n # if that kills one of the enemies, it's like draw\n elif n_dead_other_agents == 1:\n reward = [0 for agent in self._agents]\n else:\n print(\"WARNING: n_dead_other_agents==2 but not done...\")\n\n\n if done:\n # Callback to let the agents know that the game has ended.\n for agent in self._agents:\n agent.episode_end(reward[agent.agent_id])\n\n self._step_count += 1\n return obs, reward, done, info\n\n def render(self,\n mode=None,\n close=False,\n record_pngs_dir=None,\n record_json_dir=None,\n do_sleep=True):\n if close:\n self.close()\n return\n\n mode = mode or self._mode or 'human'\n\n if mode == 'rgb_array':\n rgb_array = graphics.PixelViewer.rgb_array(\n self._board, self._board_size, self._agents,\n self._is_partially_observable, self._agent_view_size)\n return rgb_array[0]\n\n if self._viewer is None:\n if mode == 'rgb_pixel':\n self._viewer = graphics.PixelViewer(\n board_size=self._board_size,\n agents=self._agents,\n agent_view_size=self._agent_view_size,\n partially_observable=self._is_partially_observable)\n else:\n self._viewer = graphics.PommeViewer(\n board_size=self._board_size,\n agents=self._agents,\n partially_observable=self._is_partially_observable,\n agent_view_size=self._agent_view_size,\n game_type=self._game_type)\n\n self._viewer.set_board(self._board)\n self._viewer.set_agents(self._agents)\n self._viewer.set_step(self._step_count)\n self._viewer.set_bombs(self._bombs)\n self._viewer.set_flames(self._flames)\n self._viewer.render()\n\n # Register all agents which need human input with Pyglet.\n # This needs to be done here as the first `imshow` creates the\n # window. Using `push_handlers` allows for easily creating agents\n # that use other Pyglet inputs such as joystick, for example.\n for agent in self._agents:\n if agent.has_user_input():\n self._viewer.window.push_handlers(agent)\n else:\n self._viewer.set_board(self._board)\n self._viewer.set_agents(self._agents)\n self._viewer.set_step(self._step_count)\n self._viewer.set_bombs(self._bombs)\n self._viewer.set_flames(self._flames)\n self._viewer.render()\n\n if record_pngs_dir:\n self._viewer.save(record_pngs_dir)\n if record_json_dir:\n self.save_json(record_json_dir)\n\n if do_sleep:\n time.sleep(1.0 / self._render_fps)\n\n def close(self):\n if self._viewer is not None:\n self._viewer.close()\n self._viewer = None\n\n for agent in self._agents:\n agent.shutdown()\n\n @staticmethod\n def featurize(obs):\n board = obs[\"board\"].reshape(-1).astype(np.float32)\n bomb_blast_strength = obs[\"bomb_blast_strength\"].reshape(-1) \\\n .astype(np.float32)\n bomb_life = obs[\"bomb_life\"].reshape(-1).astype(np.float32)\n position = utility.make_np_float(obs[\"position\"])\n ammo = utility.make_np_float([obs[\"ammo\"]])\n blast_strength = utility.make_np_float([obs[\"blast_strength\"]])\n can_kick = utility.make_np_float([obs[\"can_kick\"]])\n\n teammate = utility.make_np_float([obs[\"teammate\"].value])\n enemies = utility.make_np_float([e.value for e in obs[\"enemies\"]])\n return np.concatenate(\n (board, bomb_blast_strength, bomb_life, position, ammo,\n blast_strength, can_kick, teammate, enemies))\n\n def save_json(self, record_json_dir):\n info = self.get_json_info()\n count = \"{0:0=3d}\".format(self._step_count)\n suffix = count + '.json'\n path = os.path.join(record_json_dir, suffix)\n with open(path, 'w') as f:\n f.write(json.dumps(info, sort_keys=True, indent=4))\n\n def get_json_info(self):\n \"\"\"Returns a json snapshot of the current game state.\"\"\"\n ret = {\n 'board_size': self._board_size,\n 'step_count': self._step_count,\n 'board': self._board,\n 'agents': self._agents,\n 'bombs': self._bombs,\n 'flames': self._flames,\n 'items': [[k, i] for k, i in self._items.items()],\n 'intended_actions': self._intended_actions,\n 'agent_port': {str(agent.agent_id): agent._port if hasattr(agent, '_port') else \"-1\" for agent in self._agents},\n 'agent_names':{str(agent.agent_id): str(agent) for agent in self._agents},\n 'is_human_controlled': {str(agent.agent_id): agent._is_human_controlled if hasattr(agent, '_is_human_controlled') else False for agent in self._agents},\n }\n for key, value in ret.items():\n ret[key] = json.dumps(value, cls=utility.PommermanJSONEncoder)\n return ret\n\n def set_json_info(self):\n \"\"\"Sets the game state as the init_game_state.\"\"\"\n board_size = int(self._init_game_state['board_size'])\n self._board_size = board_size\n self._step_count = int(self._init_game_state['step_count'])\n\n board_array = json.loads(self._init_game_state['board'])\n self._board = np.ones((board_size, board_size)).astype(np.uint8)\n self._board *= constants.Item.Passage.value\n for x in range(self._board_size):\n for y in range(self._board_size):\n self._board[x, y] = board_array[x][y]\n\n self._items = {}\n item_array = json.loads(self._init_game_state['items'])\n for i in item_array:\n self._items[tuple(i[0])] = i[1]\n\n agent_array = json.loads(self._init_game_state['agents'])\n for a in agent_array:\n agent = next(x for x in self._agents \\\n if x.agent_id == a['agent_id'])\n agent.set_start_position((a['position'][0], a['position'][1]))\n agent.reset(\n int(a['ammo']), bool(a['is_alive']), int(a['blast_strength']),\n bool(a['can_kick']))\n\n self._bombs = []\n bomb_array = json.loads(self._init_game_state['bombs'])\n for b in bomb_array:\n bomber = next(x for x in self._agents \\\n if x.agent_id == b['bomber_id'])\n moving_direction = b['moving_direction']\n if moving_direction is not None:\n moving_direction = constants.Action(moving_direction)\n self._bombs.append(\n characters.Bomb(bomber, tuple(b['position']), int(b['life']),\n int(b['blast_strength']), moving_direction))\n\n self._flames = []\n flame_array = json.loads(self._init_game_state['flames'])\n for f in flame_array:\n self._flames.append(\n characters.Flame(tuple(f['position']), f['life']))\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.ones"
]
] |
woes-lynne/3DRenderPy | [
"44d9106b51ae4ce8307c794b85d4ec649751beb3"
] | [
"src/RenderPy/Canvas.py"
] | [
"import os\nimport numpy as np\nfrom PIL import Image\nfrom RenderPy.Color import Color\n# ---------------------\n\"\"\" \nCanvas class helps to describe a set of pixel in grids that help generate images.\nEach canvas contains 2 elements: weight ,height\nwidth, height are all float value\nwidth: defining the number of columns, height: defining the number of rows\n\nCanvas class contains the following functions:\n__init__\n__eq__\npixelAt\nwritePixel\ncanvasToPPM\ncanvasToPNG\nsaveImage\n\"\"\"\n# ---------------------\n\"\"\" \n Make sure you are on ~/src\n ---------------------------------------------------\n nosetests -v ../test/CanvasTest.py\n --- OR ---- \n python3 -m nose -v ../test/CanvasTest.py\n --- OR ---- \n python -m nose -v ../test/CanvasTest.py\n ---------------------------------------------------\n\"\"\"\n\n\nclass Canvas():\n # ---------------------\n \"\"\"\n Canvas class takes in two numbers\n w is width, h is height\n \"\"\"\n # ---------------------\n\n def __init__(self, w: int, h: int):\n self.width = w\n self.height = h\n self.canv = [[Color() for _ in range(w)] for _ in range(h)]\n\n # ---------------------\n \"\"\"\n Define equivalence of two Canvas instances\n \"\"\"\n # ---------------------\n\n def __eq__(self, canvas2: \"Canvas\"):\n if self.width == canvas2.width and self.height == canvas2.height:\n for i in range(self.height):\n for j in range(self.width):\n if self.canv[i][j] != canvas2.canv[i][j]:\n return False\n return True\n return False\n\n # ---------------------\n \"\"\"\n Get the color of a given pixel\n ---- Inputs: --------\n * cl: A float indicating the column number of where the pixel is at\n * rw: A float indicating the row number of where the pixel is at\n ---- Outputs: --------\n * Color: the color at the pixel\n \n \"\"\"\n # ---------------------\n\n def pixelAt(self, cl: int, rw: int):\n return self.canv[rw][cl]\n\n # ---------------------\n \"\"\"\n Change the color of a given pixel\n ---- Inputs: --------\n * cl: A float indicating the column number of where the pixel is at\n * rw: A float indicating the row number of where the pixel is at\n * color: A Color wanted to be at the pixel\n \n \"\"\"\n # ---------------------\n\n def writePixel(self, cl: int, rw: int, color: \"Color\"):\n self.canv[rw][cl] = color\n # -----------------\n \"\"\"\n Make sure you are on ~/src\n ---------------------------------------------------\n nosetests -v ../test/CanvasTest.py:test_writePixel\n --- OR ---- \n python3 -m nose -v ../test/CanvasTest.py:test_writePixel\n --- OR ---- \n python -m nose -v ../test/CanvasTest.py:test_writePixel\n ---------------------------------------------------\n \"\"\"\n\n # ---------------------\n \"\"\"\n Convert the canvas to ppm formatted images\n Generally existing PPM softwares accept a line more than 70 characters, \n but there are some needs to have each line having less than or equal to 70 characters\n We also need a new line at the end of the string\n ---- Outputs: --------\n * result: A string containing the final ppm file\n \"\"\"\n # ---------------------\n\n def canvasToPPM(self):\n result = \"P3\\n\"+str(self.width) + \" \" + str(self.height) + \"\\n255\\n\"\n for row in self.canv:\n temp = \"\"\n for pix in row:\n # providing a conversion from 0 to 1 to 255 scale\n # if greater than 1, we read it as 1\n # if smaller than 0, we read it as 0\n def setColor(color):\n if color >= 1:\n return 255\n elif color <= 0:\n return 0\n else:\n return int(round(color * 255, 0))\n red = str(setColor(pix.r))\n green = str(setColor(pix.g))\n blue = str(setColor(pix.b))\n # for each color, if the existing line adding 1 to 3 characters\n # we cut it off and strip the last space and add a new line\n # so that we fulfill the 70 character requirment and do not cut off a color\n if len(temp) + len(red) > 70:\n result += temp[:-1] + \"\\n\"\n temp = \"\"\n temp += red + \" \"\n if len(temp) + len(green) > 70:\n result += temp[:-1] + \"\\n\"\n temp = \"\"\n temp += green + \" \"\n if len(temp) + len(blue) > 70:\n result += temp[:-1] + \"\\n\"\n temp = \"\"\n temp += blue + \" \"\n temp = temp[:-1] + \"\\n\"\n result += temp\n return result\n # -----------------\n \"\"\"\n Make sure you are on ~/src\n ---------------------------------------------------\n nosetests -v ../test/CanvasTest.py:test_canvasToPPM\n --- OR ---- \n python3 -m nose -v ../test/CanvasTest.py:test_canvasToPPM\n --- OR ---- \n python -m nose -v ../test/CanvasTest.py:test_canvasToPPM\n ---------------------------------------------------\n \"\"\"\n\n # ---------------------\n \"\"\"\n Convert the canvas to a numpy array in order to call PIL.image to convert it to png image\n ---- Outputs: --------\n * result: A numpy array of size (h,w,3)\n \"\"\"\n # ---------------------\n\n def canvasToPNG(self):\n result = []\n for rw in range(self.height):\n row = []\n for cl in range(self.width):\n cur = np.rint(self.pixelAt(cl, rw).arr*255)\n if cur[0] > 255:\n cur[0] = 255\n elif cur[0] < 0:\n cur[0] = 0\n if cur[1] > 255:\n cur[1] = 255\n elif cur[1] < 0:\n cur[1] = 0\n if cur[2] > 255:\n cur[2] = 255\n elif cur[2] < 0:\n cur[2] = 0\n row.append(cur)\n result.append(row)\n result = np.array(result)\n result = result.astype(np.uint8)\n return result\n # -----------------\n \"\"\"\n Make sure you are on ~/src\n ---------------------------------------------------\n nosetests -v ../test/CanvasTest.py:test_canvasToPNG\n --- OR ---- \n python3 -m nose -v ../test/CanvasTest.py:test_canvasToPNG\n --- OR ---- \n python -m nose -v ../test/CanvasTest.py:test_canvasToPNG\n ---------------------------------------------------\n \"\"\"\n\n # ---------------------\n \"\"\"\n Save the result string from canvasToPPM to ppm file\n ---- Inputs: --------\n * filename: A string indicating the file name you want for the image\n * directory: default is the images folder, or a specefic one of your choice \n \"\"\"\n # ---------------------\n\n def saveImage(self, filename: str, directory: str = \"../images/\", fileType=\"png\"):\n if not os.path.isdir(directory):\n os.mkdir(directory)\n path = directory + filename + \".\" + fileType\n if fileType == \"ppm\":\n result = self.canvasToPPM()\n f = open(path, \"w\")\n f.write(result)\n f.close()\n else:\n result = self.canvasToPNG()\n img = Image.fromarray(result, 'RGB')\n img.save(path)\n print(\n filename + \" written successfully, please take a look at folder \" + directory)\n\n # -----------------\n \"\"\"\n Go to your chosen folder to see whether the image is what you want!\n \"\"\"\n"
] | [
[
"numpy.array"
]
] |
shray2k/low_pt_tau_reco | [
"f3a8700b86a6630239fd7516d05977cd50a28b44"
] | [
"model/model.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport uproot\nfrom array import array\n\n#check whether you are using CPUs or GPUs\nfrom tensorflow.python.client import device_lib\nprint('Available devices are', device_lib.list_local_devices())\nprint('#######')\n\ntau_feature_names = [\n b'pi_minus1_pt',\n b'pi_minus1_eta',\n b'pi_minus1_phi',\n b'pi_minus2_pt',\n b'pi_minus2_eta',\n b'pi_minus2_phi',\n b'pi_minus3_pt',\n b'pi_minus3_eta',\n b'pi_minus3_phi',\n]\ntau_label_names = [\n b'neutrino_pt',\n b'neutrino_eta',\n b'neutrino_phi',\n]\n\nantitau_feature_names = [\n b'pi_plus1_pt',\n b'pi_plus1_eta',\n b'pi_plus1_phi',\n b'pi_plus2_pt',\n b'pi_plus2_eta',\n b'pi_plus2_phi',\n b'pi_plus3_pt',\n b'pi_plus3_eta',\n b'pi_plus3_phi',\n]\nantitau_label_names = [\n b'antineutrino_pt',\n b'antineutrino_eta',\n b'antineutrino_phi',\n]\n\n#file = uproot.open('cartesian_upsilon_taus.root')['tree']\n#file = uproot.open('momentum_vector_data100k.root')['tree']\n#file = uproot.open('cartesian_upsilon_taus_ALL.root')['tree']\n#file = uproot.open('cartesian_upsilon_taus_15GeV_Mary_ALL.root')['tree']\n#file = uproot.open('cartesian_upsilon_tausGS_88_plus_91.root')['tree'] #ok this is probably too big, need to figure out how to get this to work\n#file = uproot.open('cartesian_upsilon_taus_GSwBP77.root')['tree'] #still too big...\n#file = uproot.open('cartesian_upsilon_taus_GSwBP6.root')['tree'] #3204 GS events\nfile = uproot.open('momentum_vector_data100k_WSO.root')['tree']\n\ntau_features = []\ntau_labels = []\nantitau_features = []\nantitau_labels = []\n\nfor name in tau_feature_names:\n if b'_phi' in name:\n tau_features.append(\n np.sin(file.array(name))\n )\n tau_features.append(\n np.cos(file.array(name))\n )\n else:\n tau_features.append(file.array(name))\n\nfor name in tau_label_names:\n if b'_phi' in name:\n tau_labels.append(\n np.sin(file.array(name))\n )\n tau_labels.append(\n np.cos(file.array(name))\n )\n else:\n tau_labels.append(file.array(name))\n\nfor name in antitau_feature_names:\n if b'_phi' in name:\n antitau_features.append(\n np.sin(file.array(name))\n )\n antitau_features.append(\n np.cos(file.array(name))\n )\n else:\n antitau_features.append(file.array(name))\n\nfor name in antitau_label_names:\n if b'_phi' in name:\n antitau_labels.append(\n np.sin(file.array(name))\n )\n antitau_labels.append(\n np.cos(file.array(name))\n )\n else:\n antitau_labels.append(file.array(name))\n\ntau_features = np.transpose(np.array(tau_features))\n\ntotal_tau_pt = tau_features[:, 0] + tau_features[:, 4] + tau_features[:, 8]\n#comment out the lines below if you do NOT want normalized pT\n#tau_features[:, 0] = tau_features[:, 0] / total_tau_pt\n#tau_features[:, 4] = tau_features[:, 4] / total_tau_pt\n#tau_features[:, 8] = tau_features[:, 8] / total_tau_pt\n\ntau_features_train = tau_features[0: int(0.9 * tau_features.shape[0]), :]\ntau_features_test = tau_features[int(0.9 * tau_features.shape[0]):, :]\n\ntau_labels = np.transpose(np.array(tau_labels))\ntau_labels_train = tau_labels[0: int(0.9 * tau_labels.shape[0]), :]\ntau_labels_test = tau_labels[int(0.9 * tau_labels.shape[0]):, :]\n\nantitau_features = np.transpose(np.array(antitau_features))\n\ntotal_antitau_pt = antitau_features[:, 0] + antitau_features[:, 4] + antitau_features[:, 8]\n#Comment out the lines below if you do NOT want normalized pT\n#antitau_features[:, 0] = antitau_features[:, 0] / total_antitau_pt\n#antitau_features[:, 4] = antitau_features[:, 4] / total_antitau_pt\n#antitau_features[:, 8] = antitau_features[:, 8] / total_antitau_pt\n\nantitau_features_train = antitau_features[0: int(0.9 * antitau_features.shape[0]), :]\nantitau_features_test = antitau_features[int(0.9 * antitau_features.shape[0]):, :]\n\nantitau_labels = np.transpose(np.array(antitau_labels))\nantitau_labels_train = antitau_labels[0: int(0.9 * antitau_labels.shape[0]), :]\nantitau_labels_test = antitau_labels[int(0.9 * antitau_labels.shape[0]):, :]\n\n\ndef create_model():\n model = tf.keras.Sequential()\n model.add(\n tf.keras.layers.Dense(640, activation=tf.keras.activations.relu, input_shape=(12,)) \n )\n model.add(\n tf.keras.layers.Dropout(0.3)\n )\n model.add(\n tf.keras.layers.Dense(1280, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.5)\n )\n model.add(\n tf.keras.layers.Dense(2560, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.5)\n )\n model.add(\n tf.keras.layers.Dense(1280, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.3)\n )\n model.add(\n tf.keras.layers.Dense(640, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.2)\n )\n model.add(\n tf.keras.layers.Dense(320, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.1)\n )\n model.add(\n tf.keras.layers.Dense(160, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.1)\n )\n model.add(\n tf.keras.layers.Dense(128, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.1)\n )\n model.add(\n tf.keras.layers.Dense(64, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.1)\n )\n model.add(\n tf.keras.layers.Dense(32, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dropout(0.05)\n )\n model.add(\n tf.keras.layers.Dense(8, activation=tf.keras.activations.relu)\n )\n model.add(\n tf.keras.layers.Dense(4)\n )\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(lr=0.001, decay=0.0001),\n loss=tf.keras.losses.mean_squared_error\n )\n return model\n\n\ntau_model = create_model()\nantitau_model = create_model()\n\ntau_model.fit(\n tau_features_train,\n tau_labels_train,\n batch_size=20,\n epochs=400,\n validation_data=(tau_features_test, tau_labels_test)\n)\ntau_model.evaluate(\n tau_features_test,\n tau_labels_test,\n batch_size=10\n)\nantitau_model.fit(\n antitau_features_train,\n antitau_labels_train,\n batch_size=20,\n epochs=400,\n validation_data=(antitau_features_test, antitau_labels_test)\n)\nantitau_model.evaluate(\n antitau_features_test,\n antitau_labels_test,\n batch_size=10\n)\n\npred = tau_model.predict(\n tau_features_test\n)\nanti_pred = antitau_model.predict(\n antitau_features_test\n)\n\ntau_model.save('tau_model_reproduce_WSO_my_own_try_no_norm.hdf5')\nantitau_model.save('antitau_model_reproduce_WSO_my_own_no_norm.hdf5')\nprint ('tau_model summary is:', tau_model.summary())\nprint ('antitau_model summary is:', antitau_model.summary())\n"
] | [
[
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.keras.Sequential",
"tensorflow.keras.optimizers.Adam"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.