repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
CameleoGrey/ProfitTM | [
"d5af3f3b9a3a6c278316089331f981e77fb81ebc"
]
| [
"profittm/TreeProfitTM.py"
]
| [
"from profittm.ProfitTM import ProfitTM\nimport numpy as np\nimport pandas as pd\nfrom profittm.save_load import save, load\nimport networkx as nx\nimport uuid\nfrom profittm.graph_draw import plotGraph\nfrom sklearn.preprocessing import OneHotEncoder\n\n\nclass TreeProfitTM():\n\n def __init__(self, maxDepth=None, curLevel=0, parentsName=None):\n self.node = None\n self.childs = {}\n self.topicNames = None\n self.topicCount = None\n self.maxDepth = maxDepth\n self.curLevel = curLevel\n self.treeName = str(uuid.uuid4())\n\n if curLevel == 0 and parentsName is None:\n self.isRoot = True\n else:\n self.isRoot = False\n\n pass\n\n def fit(self, x):\n\n if self.curLevel == 0:\n #x = np.array(x)\n self.node = ProfitTM()\n self.node.fitTextVectorizer(x)\n self.node.cacheTextVectors(x)\n self.node.fit(x)\n self.topicNames = self.node.getTopicNames(x)\n self.topicCount = self.node.topicCount\n else:\n self.node.cacheTextVectors(x)\n self.node.fit(x)\n self.topicNames = self.node.getTopicNames(x)\n self.topicCount = self.node.topicCount\n\n if self.curLevel + 1 < self.maxDepth:\n y = self.node.predict(x)\n #self.node.cleanCache()\n uniqY = np.unique(y)\n topicDocs = {}\n for topic in uniqY:\n #topicDocs[topic] = x[y == topic]\n ########\n topicDocs[topic] = []\n for i in range(len(y)):\n if y[i] == topic:\n topicDocs[topic].append(x[i])\n ########\n self.childs[topic] = TreeProfitTM(self.maxDepth, self.curLevel + 1)\n self.childs[topic].node = ProfitTM()\n self.childs[topic].node.setVectorizer(self.node.vectorizer)\n #if self.isRoot is False:\n # self.node.vectorizer = None\n\n self.node.cleanCache()\n\n if self.curLevel + 1 < self.maxDepth:\n for topic in topicDocs.keys():\n self.childs[topic].fit(topicDocs[topic])\n pass\n\n def predict(self, x, returnVectors=False):\n\n sharedPredicts = self.prepareToPredict(x)\n self.hPredict(sharedPredicts, predInds=None)\n\n if returnVectors:\n sharedPredicts = self.convertPredictsToVectors(sharedPredicts)\n\n return sharedPredicts\n\n def prepareToPredict(self, x):\n sharedPredicts = pd.DataFrame({\"docs\": x})\n for i in range(self.maxDepth):\n sharedPredicts[i] = None\n return sharedPredicts\n\n def hPredict(self, sharedPredicts, predInds=None):\n\n if self.curLevel < self.maxDepth:\n\n if predInds is None:\n predInds = sharedPredicts.index\n\n docs = sharedPredicts[\"docs\"]\n y = self.node.predict(docs[predInds].values)\n sharedPredicts.iloc[predInds, self.curLevel + 1] = y\n uniqY = np.unique(y)\n for topic in uniqY:\n nextInds = docs[predInds][y == topic].index\n if len(self.childs.keys()) == 0:\n self.leafPredict(sharedPredicts, nextInds)\n else:\n self.childs[topic].hPredict(sharedPredicts, nextInds)\n pass\n\n def leafPredict(self, sharedPredicts, predInds):\n docs = sharedPredicts[\"docs\"]\n y = self.node.predict(docs[predInds].values)\n sharedPredicts.iloc[predInds, self.curLevel + 1] = y\n pass\n\n def plotTopicTree(self):\n topicGraph = nx.OrderedGraph()\n self.buildTopicGraph(topicGraph)\n plotGraph(topicGraph)\n pass\n\n def buildTopicGraph(self, graph):\n\n if self.isRoot:\n for parentTopic in self.topicNames.keys():\n parentTopicName = self.extractTopicName(self.topicNames[parentTopic])\n graph.add_edge(\"docs\", parentTopicName)\n\n if len(self.childs.keys()) != 0:\n for parentTopic in self.topicNames.keys():\n parentTopicName = self.extractTopicName(self.topicNames[parentTopic])\n for childTopic in self.childs[parentTopic].topicNames.keys():\n childTopicName = self.extractTopicName(self.childs[parentTopic].topicNames[childTopic])\n graph.add_edge(parentTopicName, childTopicName)\n for key in self.childs.keys():\n self.childs[key].buildTopicGraph(graph)\n pass\n\n def extractTopicName(self, topicTuple):\n topic = []\n for i in range(len(topicTuple)):\n topic.append(topicTuple[i][0])\n topic = \"\\n\".join(topic)\n return topic\n\n def buildTreeGraph(self, graph):\n if len(self.childs.keys()) != 0:\n for key in self.childs.keys():\n graph.add_edge(self.treeName, self.childs[key].treeName, weight=key)\n for key in self.childs.keys():\n self.childs[key].buildTreeGraph(graph)\n pass\n\n def getTopicDict(self):\n\n sharedTopicDict = {}\n self.enrichTopicDict(sharedTopicDict)\n return sharedTopicDict\n\n def enrichTopicDict(self, sharedTopicDict, parentTopicID=None):\n if self.isRoot:\n for parentTopic in self.topicNames.keys():\n sharedTopicDict[parentTopic] = self.extractTopicName(self.topicNames[parentTopic])\n\n if len(self.childs.keys()) != 0:\n for parentTopic in self.topicNames.keys():\n for childTopic in self.childs[parentTopic].topicNames.keys():\n if self.isRoot:\n parentTopicID = str(parentTopic)\n childID = str(parentTopicID) + \".\" + str(childTopic)\n sharedTopicDict[childID] = self.extractTopicName(self.childs[parentTopic].topicNames[childTopic])\n self.childs[parentTopic].enrichTopicDict(sharedTopicDict, childID)\n pass\n\n def getTopicVectorsDict(self):\n topicDict = self.getTopicDict()\n topicIds = np.array(list(topicDict.keys())).reshape((-1, 1))\n topicVecs = OneHotEncoder(dtype=int, sparse=False).fit_transform(topicIds)\n topicIds = topicIds.reshape((-1, ))\n\n topicVecsDict = {}\n for i in range(len(topicVecs)):\n topicVecsDict[topicIds[i]] = topicVecs[i]\n\n return topicVecsDict\n\n def convertPredictsToVectors(self, sharedPredicts):\n labels = np.zeros((len(sharedPredicts), self.maxDepth), dtype=int)\n\n labels = labels.T\n for i in range(self.maxDepth):\n labels[i] = sharedPredicts[i].values\n labels = labels.T\n print(labels[:10])\n\n labels = list(labels)\n for i in range(len(labels)):\n labels[i] = list(labels[i])\n for j in range(len(labels[i])):\n labels[i][j] = str(labels[i][j])\n print(labels[:10])\n\n ids = []\n for i in range(len(labels)):\n ids.append( \".\".join(labels[i]) )\n print(ids[:10])\n\n topicVectorsDict = self.getTopicVectorsDict()\n vectors = []\n for i in range(len(ids)):\n vectors.append( topicVectorsDict[ids[i]] )\n vectors = np.array(vectors)\n print(vectors[:10])\n\n return vectors\n\n def save(self, name, dir):\n print(\"Saving whole hierarchy topic model to {}...\".format(dir + name))\n treeGraph = nx.OrderedGraph()\n self.buildTreeGraph(treeGraph)\n save(dir + name + \"_treegraph.pkl\", treeGraph)\n vectorizer = self.node.vectorizer\n save(dir + name + \"_vectorizer.pkl\", vectorizer)\n self.removeVectorizers()\n self.saveTrees(name, dir)\n self.placeBackVectorizers(vectorizer)\n print(\"The whole hierarchy topic model saved.\")\n pass\n\n def removeVectorizers(self):\n self.node.vectorizer = None\n if len(self.childs.keys()) != 0:\n for key in self.childs.keys():\n self.childs[key].removeVectorizers()\n\n def placeBackVectorizers(self, vectorizer):\n self.node.vectorizer = vectorizer\n if len(self.childs.keys()) != 0:\n for key in self.childs.keys():\n self.childs[key].node.vectorizer = vectorizer\n\n\n def saveTrees(self, name, dir):\n\n print(\"Saving tree {}\".format(dir + name + \"_{}\".format(self.treeName)))\n\n self.node.save(name + \"_{}\".format(self.treeName), dir)\n node = self.node\n self.node = None\n\n childs = self.childs\n self.childs = None\n save(dir + name + \"_{}_metadata.pkl\".format(self.treeName), self)\n self.node = node\n self.childs = childs\n\n for key in self.childs.keys():\n self.childs[key].saveTrees(name, dir)\n\n def load(self, name, dir):\n\n print(\"Loading whole hierarchy topic model from {}...\".format(dir + name))\n treeGraph = load(dir + name + \"_treegraph.pkl\")\n vectorizer = load(dir + name + \"_vectorizer.pkl\")\n\n trees = {}\n for node in treeGraph.nodes:\n treeName = node\n trees[node] = self.loadTree(name, dir, treeName)\n\n edges = list(treeGraph.edges.data(\"weight\"))\n for edge in edges:\n trees[edge[0]].childs[edge[2]] = trees[edge[1]]\n\n loadedTree = None\n for key in trees.keys():\n if trees[key].isRoot:\n loadedTree = trees[key]\n break\n\n loadedTree.placeBackVectorizers(vectorizer)\n print(\"The whole hierarchy topic model loaded.\")\n return loadedTree\n\n def loadTree(self, name, dir, treeName):\n\n tree = load(dir + name + \"_{}_metadata.pkl\".format(treeName))\n tree.node = ProfitTM()\n tree.node.load(name + \"_{}\".format(treeName), dir)\n tree.childs = {}\n return tree"
]
| [
[
"pandas.DataFrame",
"numpy.array",
"sklearn.preprocessing.OneHotEncoder",
"numpy.unique"
]
]
|
anahm/inferring-population-preferences | [
"1eec9c6966e65c615f3cf5bd769ab121369b926d"
]
| [
"val_code/bimixture_mh.py"
]
| [
"\r\nimport numpy as np\r\nimport scipy\r\nimport scipy.stats\r\n\r\n\r\ndef run(data, n_iter = 1000):\r\n\r\n samples = [None for i in range(n_iter)]\r\n values = [None for i in range(n_iter)]\r\n\r\n samples[0] = np.random.normal(size = 3)\r\n values[0] = log_like(samples[0], data)\r\n\r\n for i in range(1, n_iter):\r\n\r\n par = np.random.choice(len(samples[i-1]))\r\n\r\n samples[i] = np.copy(samples[i-1])\r\n\r\n samples[i][par] = proposal(samples[i][par])\r\n\r\n old_like = values[i-1]\r\n new_like = log_like(samples[i], data)\r\n\r\n if np.log(np.random.random()) < new_like - old_like:\r\n values[i] = new_like\r\n else:\r\n samples[i] = np.copy(samples[i-1])\r\n values[i] = old_like\r\n\r\n final = samples[-1]\r\n return {'mean0':final[0], 'mean1':final[1], 'sd':np.exp(final[2])}\r\n\r\ndef proposal(value):\r\n return value + np.random.normal()*0.1\r\n\r\ndef log_like(pars, data):\r\n mu0, mu1, log_sd = pars\r\n sd = np.exp(log_sd)\r\n\r\n comp0_loglike = np.log(0.5) + scipy.stats.norm.logpdf(data, mu0, sd)\r\n comp1_loglike = np.log(0.5) + scipy.stats.norm.logpdf(data, mu1, sd)\r\n\r\n return sum(scipy.misc.logsumexp([comp0_loglike, comp1_loglike], axis = 0))\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n#\r\n# pars = [-5,10,0.1]\r\n#\r\n# data = []\r\n# for i in range(1000):\r\n# if np.random.random() < 0.5:\r\n# data += [np.random.normal()*pars[2] + pars[0]]\r\n# else:\r\n# data += [np.random.normal()*pars[2] + pars[1]]\r\n#\r\n# results = run(data)\r\n# print results\r\n"
]
| [
[
"numpy.random.normal",
"scipy.stats.norm.logpdf",
"numpy.log",
"numpy.copy",
"numpy.exp",
"numpy.random.random",
"scipy.misc.logsumexp"
]
]
|
vankhoa21991/selfsup | [
"34b21264db719265bb073441867d1a4f14836ce9"
]
| [
"libs/model/backbone/.ipynb_checkpoints/model_AE-checkpoint.py"
]
| [
"# Torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.models as models\n\nclass Autoencoder(nn.Module):\n def __init__(self):\n super(Autoencoder, self).__init__()\n # Input size: [batch, 3, 512, 512]\n # Output size: [batch, 3, 512, 512]\n\n self.encoder = models.resnet50(True)\n self.encoder.fc = nn.Linear(self.encoder.fc.in_features, 2048)\n\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(64, 52, 4, stride=2, padding=1), # [batch, 48, 4, 4]\n nn.ReLU(),\n\t\t\tnn.ConvTranspose2d(52, 48, 4, stride=2, padding=1), # [batch, 24, 8, 8]\n nn.ReLU(),\n\t\t\tnn.ConvTranspose2d(48, 36, 4, stride=2, padding=1), # [batch, 12, 16, 16]\n nn.ReLU(),\n nn.ConvTranspose2d(36, 24, 4, stride=2, padding=1), # [batch, 3, 32, 32]\n nn.ReLU(),\n nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]\n nn.ReLU(),\n nn.ConvTranspose2d(12, 6, 4, stride=2, padding=1), # [batch, 12, 16, 16]\n nn.ReLU(),\n nn.ConvTranspose2d(6, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n encoded = self.encoder(x)\n # print(x.size())\n # print(encoded.size())\n\n encoded_reform = torch.reshape(encoded, (-1, 64, 4, 4))\n decoded = self.decoder(encoded_reform)\n # print(decoded.size())\n return decoded, encoded"
]
| [
[
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.reshape"
]
]
|
mrbot-ai/deep_qa | [
"a1731331e12b921b4dbb43433f9c028b362495e8"
]
| [
"deep_qa/contrib/background_search/retrieval_encoders.py"
]
| [
"from collections import OrderedDict\nimport gzip\nimport logging\nfrom typing import List\n\nimport numpy\nfrom overrides import overrides\nimport pyhocon\nimport spacy\nimport tqdm\n\nfrom ...common.models import get_submodel\nfrom ...common.params import replace_none, Params\nfrom ...common import util\nfrom ...data.instances.sentence_selection.sentence_selection_instance import SentenceSelectionInstance\nfrom ...models import concrete_models\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\nclass RetrievalEncoder:\n \"\"\"\n An abstract base class for algorithms that encode queries and passages into a vector space, so\n that we can perform vector-based retrieval over the passages given the queries.\n\n We provide four methods: ``encode_query``, ``encode_passage``, and plural versions of these two\n that handle lists. Depending on the encoding algorithm, encoding the query and the passage\n might be the same, but we give different methods to allow subclasses to have different\n encodings if they wish. The default implementation of the plural version of these ``encode``\n methods just calls the singular version in a list comprehension, but a subclass could override\n this to, e.g., make use of batching on a GPU.\n \"\"\"\n def encode_query(self, query: str) -> numpy.array:\n \"\"\"\n Converts the query string into a vector.\n \"\"\"\n raise NotImplementedError\n\n def encode_passage(self, passage: str) -> numpy.array:\n \"\"\"\n Converts the passage string into a vector.\n \"\"\"\n raise NotImplementedError\n\n def encode_queries(self, queries: List[str]) -> List[numpy.array]:\n \"\"\"\n Converts the query strings into vectors.\n \"\"\"\n return [self.encode_query(query) for query in tqdm.tqdm(queries)]\n\n def encode_passages(self, passages: List[str]) -> List[numpy.array]:\n \"\"\"\n Converts the passage strings into vectors.\n \"\"\"\n return [self.encode_passage(passage) for passage in tqdm.tqdm(passages)]\n\n\nclass BagOfWordsRetrievalEncoder(RetrievalEncoder):\n \"\"\"\n A ``RetrievalEncoder`` that encodes both queries and passages as a bag of pre-trained word\n embeddings.\n\n We use spacy to tokenize the sentence.\n\n The ``type`` of this model to use in a parameter file is ``\"bow\"``.\n\n Parameters\n ----------\n embeddings_file: str\n A GloVe-formatted gzipped file containing pre-trained word embeddings.\n\n TODO(matt): I wrote this from an earlier version of ``bow_lsh.py``, before Pradeep implemented\n his IDF feature. We should update this class to also have an option for IDF encoding, and then\n we can officially retire ``bow_lsh.py``.\n \"\"\"\n def __init__(self, params: Params):\n embeddings_file = params.pop('embeddings_file')\n self.en_nlp = spacy.load('en')\n\n # These fields will get set in the call to `read_embeddings_file`.\n self.vector_max = -float(\"inf\")\n self.vector_min = float(\"inf\")\n self.embeddings = {}\n self.embedding_dim = None\n self.read_embeddings_file(embeddings_file)\n\n def read_embeddings_file(self, embeddings_file: str):\n logger.info(\"Reading embeddings file: %s\", embeddings_file)\n with gzip.open(embeddings_file, 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n self.embedding_dim = len(fields) - 1\n word = fields[0]\n vector = numpy.asarray(fields[1:], dtype='float32')\n vector_min = min(vector)\n vector_max = max(vector)\n if vector_min < self.vector_min:\n self.vector_min = vector_min\n if vector_max > self.vector_max:\n self.vector_max = vector_max\n self.embeddings[word] = vector\n\n @overrides\n def encode_query(self, query: str) -> numpy.array:\n return self._encode_sentence(query, for_background=False)\n\n @overrides\n def encode_passage(self, passage: str) -> numpy.array:\n return self._encode_sentence(passage, for_background=True)\n\n def _encode_sentence(self, sentence: str, for_background=False):\n words = [str(w.lower_) for w in self.en_nlp.tokenizer(sentence)]\n return numpy.mean(numpy.asarray([self._get_word_vector(word, for_background) for word in words]), axis=0)\n\n def _get_word_vector(self, word, random_for_unk=False):\n if word in self.embeddings:\n return self.embeddings[word]\n else:\n # If this is for the background data, we'd want to make new vectors (uniformly sampling\n # from the range (vector_min, vector_max)). If this is for the queries, we'll return a zero vector\n # for UNK because this word doesn't exist in the background data either.\n if random_for_unk:\n vector = numpy.random.uniform(low=self.vector_min, high=self.vector_max,\n size=(self.embedding_dim,))\n self.embeddings[word] = vector\n else:\n vector = numpy.zeros((self.embedding_dim,))\n return vector\n\n\nclass SentenceSelectionRetrievalEncoder(RetrievalEncoder):\n \"\"\"\n This class takes a trained sentence selection model and uses it to encode passages and queries.\n\n We make a few assumptions here:\n\n (1) The sentence selection model must have as its final layer a simple dot product, so that we\n can actually fit the model into this vector-based retrieval paradigm.\n (2) The sentence selection model needs to have its last encoder layers named\n ``question_encoder`` and ``sentences_encoder``. That is, we'll pull out submodels from the\n sentence selection model using those names, so they need to be present, and should be the\n last thing before doing the similarity computation. Similarly, the corresponding ``Input``\n layers must have names ``question_input`` and ``sentences_input``, where\n ``sentences_input`` has shape ``(batch_size, num_sentences, sentence_shape)``, and\n ``question_input`` has shape ``(batch_size, sentence_shape)``.\n\n The ``type`` of this model to use in a parameter file is ``\"sentence selection\"``.\n\n Parameters\n ----------\n model_param_file: str\n This is the parameter file used to train the sentence selection\n model with :func:`~deep_qa.run.run_model()`.\n \"\"\"\n def __init__(self, params: Params):\n model_param_file = params.pop('model_param_file')\n model_params = pyhocon.ConfigFactory.parse_file(model_param_file)\n model_params = replace_none(model_params)\n model_type = params.pop_choice('model_class', concrete_models.keys())\n model_class = concrete_models[model_type]\n self.model = model_class(model_params)\n self.model.load_model()\n # Ok, this is pretty hacky, but calling `self._get_encoder(name)` on a TextTrainer with\n # \"use default encoder\" as the fallback behavior could give you an encoder that doesn't\n # have the name you expect.\n # pylint: disable=protected-access\n question_encoder_name = self.model._get_encoder(name=\"question\",\n fallback_behavior=\"use default encoder\").name\n self.query_encoder_model = get_submodel(self.model.model,\n ['question_input'],\n [question_encoder_name],\n train_model=False,\n name='query_encoder_model')\n self.passage_encoder_model = get_submodel(self.model.model,\n ['sentences_input'],\n ['sentences_encoder'],\n train_model=False,\n name='passage_encoder_model')\n\n @overrides\n def encode_query(self, query: str) -> numpy.array:\n raise RuntimeError(\"You shouldn't use this method; use the batched version instead\")\n\n @overrides\n def encode_passage(self, passage: str) -> numpy.array:\n raise RuntimeError(\"You shouldn't use this method; use the batched version instead\")\n\n @overrides\n def encode_queries(self, queries: List[str]) -> List[numpy.array]:\n query_instances = [SentenceSelectionInstance(query, [], None) for query in queries]\n logger.info(\"Indexing queries\")\n indexed_instances = [instance.to_indexed_instance(self.model.data_indexer)\n for instance in tqdm.tqdm(query_instances)]\n logger.info(\"Padding queries\")\n for instance in tqdm.tqdm(indexed_instances):\n instance.pad(self.model._get_max_lengths()) # pylint: disable=protected-access\n query_arrays = numpy.asarray([instance.as_training_data()[0][0] for instance in indexed_instances])\n logger.info(\"Getting query vectors\")\n return self.query_encoder_model.predict(query_arrays)\n\n @overrides\n def encode_passages(self, passages: List[str]) -> List[numpy.array]:\n grouped_passages = util.group_by_count(passages, self.model.num_sentences, '')\n passage_instances = [SentenceSelectionInstance('', passage_group, None)\n for passage_group in grouped_passages]\n logger.info(\"Indexing passages\")\n indexed_instances = [instance.to_indexed_instance(self.model.data_indexer)\n for instance in tqdm.tqdm(passage_instances)]\n logger.info(\"Padding passages\")\n for instance in tqdm.tqdm(indexed_instances):\n instance.pad(self.model._get_max_lengths()) # pylint: disable=protected-access\n grouped_passage_arrays = numpy.asarray([instance.as_training_data()[0][1]\n for instance in indexed_instances])\n logger.info(\"Getting passage vectors\")\n grouped_passage_vectors = self.passage_encoder_model.predict(grouped_passage_arrays)\n shape = grouped_passage_vectors.shape\n new_shape = (shape[0] * shape[1], shape[2])\n passage_vectors = grouped_passage_vectors.reshape(new_shape)\n return passage_vectors[:len(passages)]\n\n\nretrieval_encoders = OrderedDict() # pylint: disable=invalid-name\nretrieval_encoders['bow'] = BagOfWordsRetrievalEncoder\nretrieval_encoders['sentence selection'] = SentenceSelectionRetrievalEncoder\n"
]
| [
[
"numpy.random.uniform",
"numpy.asarray",
"numpy.zeros"
]
]
|
Spenhouet/MONAI | [
"fb66ba0625c3a64ba7cdba9811a9997b336e3702"
]
| [
"monai/data/test_time_augmentation.py"
]
| [
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom copy import deepcopy\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.data.dataloader import DataLoader\nfrom monai.data.dataset import Dataset\nfrom monai.data.utils import list_data_collate, pad_list_data_collate\nfrom monai.transforms.compose import Compose\nfrom monai.transforms.inverse import InvertibleTransform\nfrom monai.transforms.inverse_batch_transform import BatchInverseTransform\nfrom monai.transforms.transform import Randomizable\nfrom monai.transforms.utils import allow_missing_keys_mode, convert_inverse_interp_mode\nfrom monai.utils.enums import CommonKeys, InverseKeys\nfrom monai.utils.module import optional_import\n\nif TYPE_CHECKING:\n from tqdm import tqdm\n\n has_tqdm = True\nelse:\n tqdm, has_tqdm = optional_import(\"tqdm\", name=\"tqdm\")\n\n__all__ = [\"TestTimeAugmentation\"]\n\n\ndef _identity(x):\n return x\n\n\nclass TestTimeAugmentation:\n \"\"\"\n Class for performing test time augmentations. This will pass the same image through the network multiple times.\n\n The user passes transform(s) to be applied to each realisation, and provided that at least one of those transforms\n is random, the network's output will vary. Provided that inverse transformations exist for all supplied spatial\n transforms, the inverse can be applied to each realisation of the network's output. Once in the same spatial\n reference, the results can then be combined and metrics computed.\n\n Test time augmentations are a useful feature for computing network uncertainty, as well as observing the network's\n dependency on the applied random transforms.\n\n Reference:\n Wang et al.,\n Aleatoric uncertainty estimation with test-time augmentation for medical image segmentation with convolutional\n neural networks,\n https://doi.org/10.1016/j.neucom.2019.01.103\n\n Args:\n transform: transform (or composed) to be applied to each realisation. At least one transform must be of type\n `Randomizable`. All random transforms must be of type `InvertibleTransform`.\n batch_size: number of realisations to infer at once.\n num_workers: how many subprocesses to use for data.\n inferrer_fn: function to use to perform inference.\n device: device on which to perform inference.\n image_key: key used to extract image from input dictionary.\n orig_key: the key of the original input data in the dict. will get the applied transform information\n for this input data, then invert them for the expected data with `image_key`.\n orig_meta_keys: the key of the meta data of original input data, will get the `affine`, `data_shape`, etc.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n if None, will try to construct meta_keys by `{orig_key}_{meta_key_postfix}`.\n meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,\n default is `meta_dict`, the meta data is a dictionary object.\n For example, to handle key `image`, read/write affine matrices from the\n metadata `image_meta_dict` dictionary's `affine` field.\n this arg only works when `meta_keys=None`.\n return_full_data: normally, metrics are returned (mode, mean, std, vvc). Setting this flag to `True` will return the\n full data. Dimensions will be same size as when passing a single image through `inferrer_fn`, with a dimension appended\n equal in size to `num_examples` (N), i.e., `[N,C,H,W,[D]]`.\n progress: whether to display a progress bar.\n\n Example:\n .. code-block:: python\n\n transform = RandAffined(keys, ...)\n post_trans = Compose([Activations(sigmoid=True), AsDiscrete(threshold_values=True)])\n\n tt_aug = TestTimeAugmentation(\n transform, batch_size=5, num_workers=0, inferrer_fn=lambda x: post_trans(model(x)), device=device\n )\n mode, mean, std, vvc = tt_aug(test_data)\n \"\"\"\n\n def __init__(\n self,\n transform: InvertibleTransform,\n batch_size: int,\n num_workers: int = 0,\n inferrer_fn: Callable = _identity,\n device: Union[str, torch.device] = \"cpu\",\n image_key=CommonKeys.IMAGE,\n orig_key=CommonKeys.LABEL,\n nearest_interp: bool = True,\n orig_meta_keys: Optional[str] = None,\n meta_key_postfix=\"meta_dict\",\n return_full_data: bool = False,\n progress: bool = True,\n ) -> None:\n self.transform = transform\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.inferrer_fn = inferrer_fn\n self.device = device\n self.image_key = image_key\n self.orig_key = orig_key\n self.nearest_interp = nearest_interp\n self.orig_meta_keys = orig_meta_keys\n self.meta_key_postfix = meta_key_postfix\n self.return_full_data = return_full_data\n self.progress = progress\n\n # check that the transform has at least one random component, and that all random transforms are invertible\n self._check_transforms()\n\n def _check_transforms(self):\n \"\"\"Should be at least 1 random transform, and all random transforms should be invertible.\"\"\"\n ts = [self.transform] if not isinstance(self.transform, Compose) else self.transform.transforms\n randoms = np.array([isinstance(t, Randomizable) for t in ts])\n invertibles = np.array([isinstance(t, InvertibleTransform) for t in ts])\n # check at least 1 random\n if sum(randoms) == 0:\n raise RuntimeError(\n \"Requires a `Randomizable` transform or a `Compose` containing at least one `Randomizable` transform.\"\n )\n # check that whenever randoms is True, invertibles is also true\n for r, i in zip(randoms, invertibles):\n if r and not i:\n warnings.warn(\n f\"Not all applied random transform(s) are invertible. Problematic transform: {type(r).__name__}\"\n )\n\n def __call__(\n self, data: Dict[str, Any], num_examples: int = 10\n ) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray, float], np.ndarray]:\n \"\"\"\n Args:\n data: dictionary data to be processed.\n num_examples: number of realisations to be processed and results combined.\n\n Returns:\n - if `return_full_data==False`: mode, mean, std, vvc. The mode, mean and standard deviation are calculated across\n `num_examples` outputs at each voxel. The volume variation coefficient (VVC) is `std/mean` across the whole output,\n including `num_examples`. See original paper for clarification.\n - if `return_full_data==False`: data is returned as-is after applying the `inferrer_fn` and then concatenating across\n the first dimension containing `num_examples`. This allows the user to perform their own analysis if desired.\n \"\"\"\n d = dict(data)\n\n # check num examples is multiple of batch size\n if num_examples % self.batch_size != 0:\n raise ValueError(\"num_examples should be multiple of batch size.\")\n\n # generate batch of data of size == batch_size, dataset and dataloader\n data_in = [deepcopy(d) for _ in range(num_examples)]\n ds = Dataset(data_in, self.transform)\n dl = DataLoader(ds, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=pad_list_data_collate)\n\n transform_key = self.orig_key + InverseKeys.KEY_SUFFIX\n\n # create inverter\n inverter = BatchInverseTransform(self.transform, dl, collate_fn=list_data_collate)\n\n outputs: List[np.ndarray] = []\n\n for batch_data in tqdm(dl) if has_tqdm and self.progress else dl:\n\n batch_images = batch_data[self.image_key].to(self.device)\n\n # do model forward pass\n batch_output = self.inferrer_fn(batch_images)\n if isinstance(batch_output, torch.Tensor):\n batch_output = batch_output.detach().cpu()\n if isinstance(batch_output, np.ndarray):\n batch_output = torch.Tensor(batch_output)\n transform_info = batch_data.get(transform_key, None)\n if transform_info is None:\n # no invertible transforms, adding dummy info for identity invertible\n transform_info = [[InverseKeys.NONE] for _ in range(self.batch_size)]\n if self.nearest_interp:\n transform_info = convert_inverse_interp_mode(\n trans_info=deepcopy(transform_info), mode=\"nearest\", align_corners=None\n )\n\n # create a dictionary containing the inferred batch and their transforms\n inferred_dict = {self.orig_key: batch_output, transform_key: transform_info}\n # if meta dict is present, add that too (required for some inverse transforms)\n meta_dict_key = self.orig_meta_keys or f\"{self.orig_key}_{self.meta_key_postfix}\"\n if meta_dict_key in batch_data:\n inferred_dict[meta_dict_key] = batch_data[meta_dict_key]\n\n # do inverse transformation (allow missing keys as only inverting the orig_key)\n with allow_missing_keys_mode(self.transform): # type: ignore\n inv_batch = inverter(inferred_dict)\n\n # append\n outputs.append(inv_batch[self.orig_key])\n\n # output\n output: np.ndarray = np.concatenate(outputs)\n\n if self.return_full_data:\n return output\n\n # calculate metrics\n mode = np.array(torch.mode(torch.Tensor(output.astype(np.int64)), dim=0).values)\n mean: np.ndarray = np.mean(output, axis=0) # type: ignore\n std: np.ndarray = np.std(output, axis=0) # type: ignore\n vvc: float = (np.std(output) / np.mean(output)).item()\n return mode, mean, std, vvc\n"
]
| [
[
"numpy.concatenate",
"numpy.std",
"torch.Tensor",
"numpy.mean"
]
]
|
anywayTsao/dicom_project | [
"69991d371e5605dfa36c04393b78737f829285a2"
]
| [
"utils/categorical_focal_loss.py"
]
| [
"\"\"\"Multiclass focal loss implementation.\"\"\"\n# __ _ _\n# / _| | | | |\n# | |_ ___ ___ __ _ | | | | ___ ___ ___\n# | _| / _ \\ / __| / _` | | | | | / _ \\ / __| / __|\n# | | | (_) | | (__ | (_| | | | | | | (_) | \\__ \\ \\__ \\\n# |_| \\___/ \\___| \\__,_| |_| |_| \\___/ |___/ |___/\n\nimport itertools\n\nimport tensorflow as tf\n\n_EPSILON = tf.keras.backend.epsilon()\n\n\ndef sparse_categorical_focal_loss(y_true, y_pred, gamma, *,\n from_logits: bool = False, axis: int = -1\n ) -> tf.Tensor:\n r\"\"\"Focal loss function for multiclass classification with integer labels.\n This loss function generalizes multiclass softmax cross-entropy by\n introducing a hyperparameter called the *focusing parameter* that allows\n hard-to-classify examples to be penalized more heavily relative to\n easy-to-classify examples.\n See :meth:`~focal_loss.binary_focal_loss` for a description of the focal\n loss in the binary setting, as presented in the original work [1]_.\n In the multiclass setting, with integer labels :math:`y`, focal loss is\n defined as\n .. math::\n L(y, \\hat{\\mathbf{p}})\n = -\\left(1 - \\hat{p}_y\\right)^\\gamma \\log(\\hat{p}_y)\n where\n * :math:`y \\in \\{0, \\ldots, K - 1\\}` is an integer class label (:math:`K`\n denotes the number of classes),\n * :math:`\\hat{\\mathbf{p}} = (\\hat{p}_0, \\ldots, \\hat{p}_{K-1})\n \\in [0, 1]^K` is a vector representing an estimated probability\n distribution over the :math:`K` classes,\n * :math:`\\gamma` (gamma, not :math:`y`) is the *focusing parameter* that\n specifies how much higher-confidence correct predictions contribute to\n the overall loss (the higher the :math:`\\gamma`, the higher the rate at\n which easy-to-classify examples are down-weighted).\n The usual multiclass softmax cross-entropy loss is recovered by setting\n :math:`\\gamma = 0`.\n Parameters\n ----------\n y_true : tensor-like\n Integer class labels.\n y_pred : tensor-like\n Either probabilities or logits, depending on the `from_logits`\n parameter.\n gamma : float or tensor-like of shape (K,)\n The focusing parameter :math:`\\gamma`. Higher values of `gamma` make\n easy-to-classify examples contribute less to the loss relative to\n hard-to-classify examples. Must be non-negative. This can be a\n one-dimensional tensor, in which case it specifies a focusing parameter\n for each class.\n from_logits : bool, optional\n Whether `y_pred` contains logits or probabilities.\n axis : int, optional\n Channel axis in the `y_pred` tensor.\n Returns\n -------\n :class:`tf.Tensor`\n The focal loss for each example.\n Examples\n --------\n This function computes the per-example focal loss between a one-dimensional\n integer label vector and a two-dimensional prediction matrix:\n >>> import numpy as np\n >>> from focal_loss import sparse_categorical_focal_loss\n >>> y_true = [0, 1, 2]\n >>> y_pred = [[0.8, 0.1, 0.1], [0.2, 0.7, 0.1], [0.2, 0.2, 0.6]]\n >>> loss = sparse_categorical_focal_loss(y_true, y_pred, gamma=2)\n >>> np.set_printoptions(precision=3)\n >>> print(loss.numpy())\n [0.009 0.032 0.082]\n Warnings\n --------\n This function does not reduce its output to a scalar, so it cannot be passed\n to :meth:`tf.keras.Model.compile` as a `loss` argument. Instead, use the\n wrapper class :class:`~focal_loss.SparseCategoricalFocalLoss`.\n References\n ----------\n .. [1] T. Lin, P. Goyal, R. Girshick, K. He and P. Dollár. Focal loss for\n dense object detection. IEEE Transactions on Pattern Analysis and\n Machine Intelligence, 2018.\n (`DOI <https://doi.org/10.1109/TPAMI.2018.2858826>`__)\n (`arXiv preprint <https://arxiv.org/abs/1708.02002>`__)\n See Also\n --------\n :meth:`~focal_loss.SparseCategoricalFocalLoss`\n A wrapper around this function that makes it a\n :class:`tf.keras.losses.Loss`.\n \"\"\"\n # Process focusing parameter\n gamma = tf.convert_to_tensor(gamma, dtype=tf.dtypes.float32)\n gamma_rank = gamma.shape.rank\n scalar_gamma = gamma_rank == 0\n\n # Process prediction tensor\n y_pred = tf.convert_to_tensor(y_pred)\n y_pred_rank = y_pred.shape.rank\n if y_pred_rank is not None:\n axis %= y_pred_rank\n if axis != y_pred_rank - 1:\n # Put channel axis last for sparse_softmax_cross_entropy_with_logits\n perm = list(itertools.chain(range(axis),\n range(axis + 1, y_pred_rank), [axis]))\n y_pred = tf.transpose(y_pred, perm=perm)\n elif axis != -1:\n raise ValueError(\n f'Cannot compute sparse categorical focal loss with axis={axis} on '\n 'a prediction tensor with statically unknown rank.')\n y_pred_shape = tf.shape(y_pred)\n\n # Process ground truth tensor\n y_true = tf.dtypes.cast(y_true, dtype=tf.dtypes.int64)\n y_true_rank = y_true.shape.rank\n\n if y_true_rank is None:\n raise NotImplementedError('Sparse categorical focal loss not supported '\n 'for target/label tensors of unknown rank')\n\n reshape_needed = (y_true_rank is not None and y_pred_rank is not None and\n y_pred_rank != y_true_rank + 1)\n if reshape_needed:\n y_true = tf.reshape(y_true, [-1])\n y_pred = tf.reshape(y_pred, [-1, y_pred_shape[-1]])\n\n if from_logits:\n logits = y_pred\n probs = tf.nn.softmax(y_pred, axis=-1)\n else:\n probs = y_pred\n logits = tf.math.log(tf.clip_by_value(y_pred, _EPSILON, 1 - _EPSILON))\n\n xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y_true,\n logits=logits,\n )\n\n y_true_rank = y_true.shape.rank\n probs = tf.gather(probs, y_true, axis=-1, batch_dims=y_true_rank)\n if not scalar_gamma:\n gamma = tf.gather(gamma, y_true, axis=0, batch_dims=y_true_rank)\n focal_modulation = (1 - probs) ** gamma\n loss = focal_modulation * xent_loss\n\n if reshape_needed:\n loss = tf.reshape(loss, y_pred_shape[:-1])\n\n return loss\n\n\[email protected]_keras_serializable()\nclass SparseCategoricalFocalLoss(tf.keras.losses.Loss):\n r\"\"\"Focal loss function for multiclass classification with integer labels.\n This loss function generalizes multiclass softmax cross-entropy by\n introducing a hyperparameter :math:`\\gamma` (gamma), called the\n *focusing parameter*, that allows hard-to-classify examples to be penalized\n more heavily relative to easy-to-classify examples.\n This class is a wrapper around\n :class:`~focal_loss.sparse_categorical_focal_loss`. See the documentation\n there for details about this loss function.\n Parameters\n ----------\n gamma : float or tensor-like of shape (K,)\n The focusing parameter :math:`\\gamma`. Higher values of `gamma` make\n easy-to-classify examples contribute less to the loss relative to\n hard-to-classify examples. Must be non-negative. This can be a\n one-dimensional tensor, in which case it specifies a focusing parameter\n for each class.\n from_logits : bool, optional\n Whether model prediction will be logits or probabilities.\n **kwargs : keyword arguments\n Other keyword arguments for :class:`tf.keras.losses.Loss` (e.g., `name`\n or `reduction`).\n Examples\n --------\n An instance of this class is a callable that takes a rank-one tensor of\n integer class labels `y_true` and a tensor of model predictions `y_pred` and\n returns a scalar tensor obtained by reducing the per-example focal loss (the\n default reduction is a batch-wise average).\n >>> from focal_loss import SparseCategoricalFocalLoss\n >>> loss_func = SparseCategoricalFocalLoss(gamma=2)\n >>> y_true = [0, 1, 2]\n >>> y_pred = [[0.8, 0.1, 0.1], [0.2, 0.7, 0.1], [0.2, 0.2, 0.6]]\n >>> loss_func(y_true, y_pred)\n <tf.Tensor: shape=(), dtype=float32, numpy=0.040919524>\n Use this class in the :mod:`tf.keras` API like any other multiclass\n classification loss function class that accepts integer labels found in\n :mod:`tf.keras.losses` (e.g.,\n :class:`tf.keras.losses.SparseCategoricalCrossentropy`:\n .. code-block:: python\n # Typical usage\n model = tf.keras.Model(...)\n model.compile(\n optimizer=...,\n loss=SparseCategoricalFocalLoss(gamma=2), # Used here like a tf.keras loss\n metrics=...,\n )\n history = model.fit(...)\n See Also\n --------\n :meth:`~focal_loss.sparse_categorical_focal_loss`\n The function that performs the focal loss computation, taking a label\n tensor and a prediction tensor and outputting a loss.\n \"\"\"\n\n def __init__(self, gamma, from_logits: bool = False, **kwargs):\n super().__init__(**kwargs)\n self.gamma = gamma\n self.from_logits = from_logits\n\n def get_config(self):\n \"\"\"Returns the config of the layer.\n A layer config is a Python dictionary containing the configuration of a\n layer. The same layer can be re-instantiated later (without its trained\n weights) from this configuration.\n Returns\n -------\n dict\n This layer's config.\n \"\"\"\n config = super().get_config()\n config.update(gamma=self.gamma, from_logits=self.from_logits)\n return config\n\n def call(self, y_true, y_pred):\n \"\"\"Compute the per-example focal loss.\n This method simply calls\n :meth:`~focal_loss.sparse_categorical_focal_loss` with the appropriate\n arguments.\n Parameters\n ----------\n y_true : tensor-like, shape (N,)\n Integer class labels.\n y_pred : tensor-like, shape (N, K)\n Either probabilities or logits, depending on the `from_logits`\n parameter.\n Returns\n -------\n :class:`tf.Tensor`\n The per-example focal loss. Reduction to a scalar is handled by\n this layer's\n :meth:`~focal_loss.SparseCateogiricalFocalLoss.__call__` method.\n \"\"\"\n return sparse_categorical_focal_loss(y_true=y_true, y_pred=y_pred,\n gamma=self.gamma,\n from_logits=self.from_logits)"
]
| [
[
"tensorflow.dtypes.cast",
"tensorflow.convert_to_tensor",
"tensorflow.shape",
"tensorflow.keras.backend.epsilon",
"tensorflow.reshape",
"tensorflow.transpose",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.clip_by_value",
"tensorflow.gather",
"tensorflow.nn.softmax",
"tensorflow.keras.utils.register_keras_serializable"
]
]
|
juls-dotcom/python_workshop | [
"ba4b8061910490f4e1c1236ce2c0ad12ef02a671"
]
| [
"notebooks/lecture/02-pandas/display_functions.py"
]
| [
"import pandas as pd\nfrom itertools import islice\nfrom ipywidgets import HBox\nfrom ipywidgets import HTML\nfrom IPython.display import display\n\npd.set_option('max_colwidth', 20)\n\n\ndef df_HBox(objects, names=None, px=50):\n children = []\n space_html = HTML('<span style=\"padding-left:{}px\">'.format(px))\n for i, obj in enumerate(objects):\n name = names[i] if names is not None else ''\n if isinstance(obj, pd.Series):\n info = ((\"Name: {}, \".format(obj.name) if obj.name else \"\") +\n \"dtype: {}\".format(obj.dtype))\n html_str = (\n '<div class=\"rendered_html\"><pre>{}</pre></div>'.format(obj.to_string())\n )\n obj_html = HTML(name+html_str+info)\n else:\n html_str = obj.to_html(classes='rendered_html')\n obj_html = HTML(name+html_str)\n\n children.append(obj_html)\n if i != (len(objects)-1):\n children.append(space_html)\n display(HBox(children=children))\n\n\ndef unpack_groups(grouped, prefix='group: '):\n names, groups = list(zip(*list(grouped)))\n names = [\"{}{}\".format(prefix, name) for name in names]\n return list(names), list(groups)\n\n\ndef head(path, n=5):\n \"\"\"Read first lines of a file.\"\"\"\n try:\n with open(path, 'r') as f:\n first_n = islice(f, n)\n for line in first_n:\n print(line, end=\"\")\n except UnicodeDecodeError:\n # Pray it's in latin-1.\n with open(path, 'r', encoding='latin-1') as f:\n first_n = islice(f, n)\n for line in first_n:\n print(line, end=\"\")\n\n\n"
]
| [
[
"pandas.set_option"
]
]
|
deepcam-cn/3D-CNN-BERT-COVID19 | [
"58adc7d570a016b48c48829e26b1f326bba91d4a"
]
| [
"BERT/video_transforms.py"
]
| [
"from __future__ import division\nimport torch\nimport random\nimport numpy as np\nimport numbers\nimport types\nimport cv2\nimport math\nimport os, sys\nimport collections\nfrom skimage.transform import warp, AffineTransform\nfrom PIL import Image\n\nimport torchvision.transforms.functional as TF\n\ncv2.setNumThreads(0) #this solve the problem that cv2 freezes when resize is used \n\nclass Compose(object):\n \"\"\"Composes several video_transforms together.\n\n Args:\n transforms (List[Transform]): list of transforms to compose.\n\n Example:\n >>> video_transforms.Compose([\n >>> video_transforms.CenterCrop(10),\n >>> video_transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, video_transforms):\n self.video_transforms = video_transforms\n\n def __call__(self, clips):\n for t in self.video_transforms:\n clips = t(clips)\n return clips\n\nclass Lambda(object):\n \"\"\"Applies a lambda as a transform\"\"\"\n def __init__(self, lambd):\n assert type(lambd) is types.LambdaType\n self.lambd = lambd\n\n def __call__(self, clips):\n return self.lambd(clips)\n\nclass ToTensor(object):\n \"\"\"Converts a numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n \"\"\"\n\n def __call__(self, clips):\n if isinstance(clips, np.ndarray):\n # handle numpy array\n clips = torch.from_numpy(clips.transpose((2, 0, 1)))\n # backward compatibility\n return clips.float().div(255.0)\n \nclass ToTensor3(object):\n \"\"\"Converts a numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n \"\"\"\n\n def __call__(self, clips):\n if isinstance(clips, np.ndarray):\n # handle numpy array\n clips = torch.from_numpy(clips.transpose((3, 2, 0, 1)))\n # backward compatibility\n return clips.float().div(255.0)\n \nclass ToTensor2(object):\n\n def __call__(self, clips):\n if isinstance(clips, np.ndarray):\n # handle numpy array\n clips = torch.from_numpy(clips.transpose((2, 0, 1)))\n # backward compatibility\n return clips.float().div(1.0)\n \nclass Reset(object):\n def __init__(self, mask_prob, num_seg):\n self.mask_prob = mask_prob\n self.num_seg =num_seg\n \n def __call__(self, clips):\n mask=np.random.binomial(1, self.mask_prob, self.num_seg).repeat(3)\n return clips*mask\n \nclass Normalize(object):\n \"\"\"Given mean: (R, G, B) and std: (R, G, B),\n will normalize each channel of the torch.*Tensor, i.e.\n channel = (channel - mean) / std\n Here, the input is a clip, not a single image. (multi-channel data)\n The dimension of mean and std depends on parameter: new_length\n If new_length = 1, it falls back to single image case (3 channel)\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n # TODO: make efficient\n torch_mean = torch.tensor([[self.mean]]).view(-1,1,1).float()\n torch_std = torch.tensor([[self.std]]).view(-1,1,1).float()\n tensor2 = (tensor - torch_mean) / torch_std\n # for t, m, s in zip(tensor, self.mean, self.std):\n # t.sub_(m).div_(s)\n return tensor2\nclass DeNormalize(object):\n \"\"\"Given mean: (R, G, B) and std: (R, G, B),\n will normalize each channel of the torch.*Tensor, i.e.\n channel = (channel - mean) / std\n Here, the input is a clip, not a single image. (multi-channel data)\n The dimension of mean and std depends on parameter: new_length\n If new_length = 1, it falls back to single image case (3 channel)\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n # TODO: make efficient\n torch_mean = torch.tensor([[self.mean]]).view(-1,1,1).float()\n torch_std = torch.tensor([[self.std]]).view(-1,1,1).float()\n tensor2 = (tensor * torch_std) + torch_mean\n # for t, m, s in zip(tensor, self.mean, self.std):\n # t.sub_(m).div_(s)\n return tensor2\n \nclass Normalize3(object):\n \"\"\"Given mean: (R, G, B) and std: (R, G, B),\n will normalize each channel of the torch.*Tensor, i.e.\n channel = (channel - mean) / std\n Here, the input is a clip, not a single image. (multi-channel data)\n The dimension of mean and std depends on parameter: new_length\n If new_length = 1, it falls back to single image case (3 channel)\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n torch_mean = torch.tensor([[self.mean]]).view(1,-1,1,1)\n torch_std = torch.tensor([[self.std]]).view(1,-1,1,1)\n tensor2 = (tensor - torch_mean) / torch_std\n return tensor2\n\nclass Normalize2(object):\n\n def __init__(self, mean, std, num_seg):\n self.mean = mean\n self.std = std\n self.num_seg = num_seg\n\n def __call__(self, tensor, num_seg):\n # TODO: make efficient\n mean = self.mean * self.num_seg\n std = self.std * self.num_seg\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensor\n \nclass Scale(object):\n \"\"\" Rescales the input numpy array to the given 'size'.\n 'size' will be the size of the smaller edge.\n For example, if height > width, then image will be\n rescaled to (size * height / width, size)\n size: size of the smaller edge\n interpolation: Default: cv2.INTER_LINEAR\n \"\"\"\n def __init__(self, size, interpolation=cv2.INTER_LINEAR):\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, clips):\n\n h, w, c = clips.shape\n new_w = 0\n new_h = 0\n if isinstance(self.size, int):\n if (w <= h and w == self.size) or (h <= w and h == self.size):\n return clips\n if w < h:\n new_w = self.size\n new_h = int(self.size * h / w)\n else:\n new_w = int(self.size * w / h)\n new_h = self.size\n else:\n new_w = self.size[0]\n new_h = self.size[1]\n\n is_color = False\n if c % 3 == 0:\n is_color = True\n\n if is_color:\n num_imgs = int(c / 3)\n scaled_clips = np.zeros((new_h,new_w,c))\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id*3:frame_id*3+3]\n scaled_clips[:,:,frame_id*3:frame_id*3+3] = cv2.resize(cur_img, (new_w, new_h), self.interpolation)\n else:\n num_imgs = int(c / 1)\n scaled_clips = np.zeros((new_h,new_w,c))\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id:frame_id+1]\n scaled_clips[:,:,frame_id:frame_id+1] = cv2.resize(cur_img, (new_w, new_h), self.interpolation)\n return scaled_clips\n\n\nclass RandomAffineTransform(object):\n def __init__(self,\n scale_range,\n rotation_range,\n shear_range,\n translation_range\n ):\n self.scale_range = scale_range\n self.rotation_range = rotation_range\n self.shear_range = shear_range\n self.translation_range = translation_range\n\n def __call__(self, clips):\n\n h, w, c = clips.shape\n num_imgs = int(c / 3)\n\n scaled_clips = np.zeros((h,w,c))\n\n scale_x = np.random.uniform(*self.scale_range)\n scale_y = np.random.uniform(*self.scale_range)\n scale = (scale_x, scale_y)\n rotation = np.random.uniform(*self.rotation_range)\n shear = np.random.uniform(*self.shear_range)\n translation = (\n np.random.uniform(*self.translation_range) * w,\n np.random.uniform(*self.translation_range) * h\n )\n af = AffineTransform(scale=scale, rotation=rotation, translation=translation)\n\n for frame_id in range(num_imgs):\n img = clips[:,:,frame_id*3:frame_id*3+3]\n\n img_data = np.array(img)\n\n #print(\"Before affine transform = \",np.max(img_data),np.min(img_data)) \n #h, w, n_chan = img_data.shape\n img_data1 = warp(img_data, af.inverse)\n img1 = np.uint8(img_data1 * 255)\n #print(\"After affine transform = \", np.max(img1),np.min(img1)) \n\n scaled_clips[:,:,frame_id*3:frame_id*3+3] = img1 \n\n return scaled_clips\n\n\n#random brightness and contrast \n\n # Affine Transforms\ndef affop(img, angle, translate, scale, shear):\n _img = TF.affine(img, angle, translate, scale, shear, resample=Image.BILINEAR)\n return _img\n\n # Color Transforms\ndef colorop(img, bright, contrast):\n _img = TF.adjust_brightness(img, bright)\n _img = TF.adjust_contrast(_img, contrast)\n return _img\n\n\nclass RandomTransforms(object):\n def __init__(self,\n ANGLE_R=10, TRANS_R=0.1, \n SCALE_R=0.2, SHEAR_R=10,\n BRIGHT_R=0.5, CONTRAST_R=0.3):\n\n self.ANGLE_R = ANGLE_R\n self.TRANS_R = TRANS_R\n self.SCALE_R = SCALE_R\n self.SHEAR_R = SHEAR_R\n self.BRIGHT_R = BRIGHT_R\n self.CONTRAST_R = CONTRAST_R\n\n def __call__(self, clips):\n\n h, w, c = clips.shape\n num_imgs = int(c / 3)\n\n scaled_clips = np.zeros((h,w,c))\n angle = random.randint(-self.ANGLE_R, self.ANGLE_R)\n translate = (random.randint(int(-w*self.TRANS_R), int(w*self.TRANS_R)), \n random.randint(int(-h*self.TRANS_R), int(h* self.TRANS_R))) # x, y axis\n scale = 1 + round(random.uniform(-self.SCALE_R, self.SCALE_R), 1)\n shear = random.randint(- self.SHEAR_R, self.SHEAR_R)\n bright = 1 + round(random.uniform(-self.BRIGHT_R, self.BRIGHT_R), 1)\n contrast = 1 + round(random.uniform(-self.CONTRAST_R, self.CONTRAST_R), 1)\n\n #print(\"angle = {}, translate = {}, scale = {}, shear = {}, bright = {}, contrast = {}\".format(\n # angle, translate, scale, shear, bright,contrast)) \n\n \n for frame_id in range(num_imgs):\n img = clips[:,:,frame_id*3:frame_id*3+3] \n #print(\"before = \", np.max(img), np.min(img)) \n img = Image.fromarray(img)\n img = affop(img, angle, translate, scale, shear)\n img = colorop(img, bright, contrast) \n\n img = np.array(img)\n #print(\"after = \", np.max(img), np.min(img)) \n scaled_clips[:,:,frame_id*3:frame_id*3+3] = img \n \n\n return scaled_clips\n\n\n\n\nclass CenterCrop(object):\n \"\"\"Crops the given numpy array at the center to have a region of\n the given size. size can be a tuple (target_height, target_width)\n or an integer, in which case the target will be of a square shape (size, size)\n \"\"\"\n\n def __init__(self, size, c=3):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.c = c \n\n def __call__(self, clips):\n h, w, c = clips.shape\n th, tw = self.size\n x1 = int(round((w - tw) / 2.))\n y1 = int(round((h - th) / 2.))\n\n is_color = False\n if c % self.c == 0:\n is_color = True\n\n if is_color:\n num_imgs = int(c / self.c)\n scaled_clips = np.zeros((th,tw,c))\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id*self.c:frame_id*self.c+self.c]\n crop_img = cur_img[y1:y1+th, x1:x1+tw, :]\n assert(crop_img.shape == (th, tw, self.c))\n scaled_clips[:,:,frame_id*self.c:frame_id*self.c+self.c] = crop_img\n return scaled_clips\n else:\n num_imgs = int(c / 1)\n scaled_clips = np.zeros((th,tw,c))\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id:frame_id+1]\n crop_img = cur_img[y1:y1+th, x1:x1+tw, :]\n assert(crop_img.shape == (th, tw, 1))\n scaled_clips[:,:,frame_id:frame_id+1] = crop_img\n return scaled_clips\n\nclass RandomHorizontalFlip(object):\n \"\"\"Randomly horizontally flips the given numpy array with a probability of 0.5\n \"\"\"\n def __call__(self, clips):\n if random.random() < 0.5:\n clips = np.fliplr(clips)\n clips = np.ascontiguousarray(clips)\n return clips\n\nclass RandomVerticalFlip(object):\n \"\"\"Randomly vertically flips the given numpy array with a probability of 0.5\n \"\"\"\n def __call__(self, clips):\n if random.random() < 0.5:\n clips = np.flipud(clips)\n clips = np.ascontiguousarray(clips)\n return clips\n\n\nclass RandomSizedCrop(object):\n \"\"\"Random crop the given numpy array to a random size of (0.08 to 1.0) of the original size\n and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio\n This is popularly used to train the Inception networks\n size: size of the smaller edge\n interpolation: Default: cv2.INTER_LINEAR\n \"\"\"\n\n def __init__(self, size, interpolation=cv2.INTER_LINEAR,c=3):\n self.size = size\n self.interpolation = interpolation\n self.c = c \n\n def __call__(self, clips):\n h, w, c = clips.shape\n is_color = False\n if c % self.c == 0:\n is_color = True\n\n for attempt in range(10):\n area = w * h\n target_area = random.uniform(0.08, 1.0) * area\n aspect_ratio = random.uniform(3. / 4, 4. / 3)\n\n new_w = int(round(math.sqrt(target_area * aspect_ratio)))\n new_h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if random.random() < 0.5:\n new_w, new_h = new_h, new_w\n\n if new_w <= w and new_h <= h:\n x1 = random.randint(0, w - new_w)\n y1 = random.randint(0, h - new_h)\n\n scaled_clips = np.zeros((self.size,self.size,c))\n if is_color:\n num_imgs = int(c / self.c)\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id*self.c:frame_id*self.c+self.c]\n crop_img = cur_img[y1:y1+new_h, x1:x1+new_w, :]\n assert(crop_img.shape == (new_h, new_w, self.c))\n scaled_clips[:,:,frame_id*self.c:frame_id*self.c+self.c] = cv2.resize(crop_img, (self.size, self.size), self.interpolation)\n return scaled_clips\n else:\n num_imgs = int(c / 1)\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id:frame_id+1]\n crop_img = cur_img[y1:y1+new_h, x1:x1+new_w, :]\n assert(crop_img.shape == (new_h, new_w, 1))\n scaled_clips[:,:,frame_id:frame_id+1] = cv2.resize(crop_img, (self.size, self.size), self.interpolation)\n return scaled_clips\n\n # Fallback\n scale = Scale(self.size, interpolation=self.interpolation)\n crop = CenterCrop(self.size)\n return crop(scale(clips))\n\nclass MultiScaleCrop(object):\n \"\"\"\n Description: Corner cropping and multi-scale cropping. Two data augmentation techniques introduced in:\n Towards Good Practices for Very Deep Two-Stream ConvNets,\n http://arxiv.org/abs/1507.02159\n Limin Wang, Yuanjun Xiong, Zhe Wang and Yu Qiao\n\n Parameters:\n size: height and width required by network input, e.g., (224, 224)\n scale_ratios: efficient scale jittering, e.g., [1.0, 0.875, 0.75, 0.66]\n fix_crop: use corner cropping or not. Default: True\n more_fix_crop: use more corners or not. Default: True\n max_distort: maximum distortion. Default: 1\n interpolation: Default: cv2.INTER_LINEAR\n \"\"\"\n\n def __init__(self, size, scale_ratios, fix_crop=True, more_fix_crop=True, max_distort=1, interpolation=cv2.INTER_LINEAR,c=3):\n self.height = size[0]\n self.width = size[1]\n self.scale_ratios = scale_ratios\n self.fix_crop = fix_crop\n self.more_fix_crop = more_fix_crop\n self.max_distort = max_distort\n self.interpolation = interpolation\n self.c = c \n\n def fillFixOffset(self, datum_height, datum_width):\n h_off = int((datum_height - self.height) / 4)\n w_off = int((datum_width - self.width) / 4)\n\n offsets = []\n offsets.append((0, 0)) # upper left\n offsets.append((0, 4*w_off)) # upper right\n offsets.append((4*h_off, 0)) # lower left\n offsets.append((4*h_off, 4*w_off)) # lower right\n offsets.append((2*h_off, 2*w_off)) # center\n\n if self.more_fix_crop:\n offsets.append((0, 2*w_off)) # top center\n offsets.append((4*h_off, 2*w_off)) # bottom center\n offsets.append((2*h_off, 0)) # left center\n offsets.append((2*h_off, 4*w_off)) # right center\n\n offsets.append((1*h_off, 1*w_off)) # upper left quarter\n offsets.append((1*h_off, 3*w_off)) # upper right quarter\n offsets.append((3*h_off, 1*w_off)) # lower left quarter\n offsets.append((3*h_off, 3*w_off)) # lower right quarter\n\n return offsets\n\n def fillCropSize(self, input_height, input_width):\n crop_sizes = []\n base_size = np.min((input_height, input_width))\n scale_rates = self.scale_ratios\n for h in range(len(scale_rates)):\n crop_h = int(base_size * scale_rates[h])\n for w in range(len(scale_rates)):\n crop_w = int(base_size * scale_rates[w])\n # append this cropping size into the list\n if (np.absolute(h-w) <= self.max_distort):\n crop_sizes.append((crop_h, crop_w))\n\n return crop_sizes\n\n def __call__(self, clips, selectedRegionOutput=False):\n\n\n h, w, c = clips.shape\n is_color = False\n if c % self.c == 0:\n is_color = True\n\n crop_size_pairs = self.fillCropSize(h, w)\n size_sel = random.randint(0, len(crop_size_pairs)-1)\n crop_height = crop_size_pairs[size_sel][0]\n crop_width = crop_size_pairs[size_sel][1]\n\n if self.fix_crop:\n offsets = self.fillFixOffset(h, w)\n off_sel = random.randint(0, len(offsets)-1)\n h_off = offsets[off_sel][0]\n w_off = offsets[off_sel][1]\n else:\n h_off = random.randint(0, h - self.height)\n w_off = random.randint(0, w - self.width)\n\n scaled_clips = np.zeros((self.height,self.width,c))\n if is_color:\n num_imgs = int(c / self.c)\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id*self.c:frame_id*self.c+self.c]\n crop_img = cur_img[h_off:h_off+crop_height, w_off:w_off+crop_width, :]\n crop_img_resized = cv2.resize(crop_img, (self.width, self.height), self.interpolation) \n scaled_clips[:,:,frame_id*self.c:frame_id*self.c+self.c] = crop_img_resized \n\n if not selectedRegionOutput:\n return scaled_clips\n else:\n return scaled_clips, off_sel\n else:\n num_imgs = int(c / 1)\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id:frame_id+1]\n crop_img = cur_img[h_off:h_off+crop_height, w_off:w_off+crop_width, :]\n scaled_clips[:,:,frame_id:frame_id+1] = np.expand_dims(cv2.resize(crop_img, (self.width, self.height), self.interpolation), axis=2)\n if not selectedRegionOutput:\n return scaled_clips\n else:\n return scaled_clips, off_sel\n\n\nclass MultiScaleFixedCrop(object):\n\n def __init__(self, size, interpolation=cv2.INTER_LINEAR):\n self.height = size[0]\n self.width = size[1]\n self.interpolation = interpolation\n\n def fillFixOffset(self, datum_height, datum_width):\n h_off = int((datum_height - self.height) / 4)\n w_off = int((datum_width - self.width) / 4)\n\n offsets = []\n offsets.append((0, 0)) # upper left\n offsets.append((0, 4*w_off)) # upper right\n offsets.append((4*h_off, 0)) # lower left\n offsets.append((4*h_off, 4*w_off)) # lower right\n offsets.append((2*h_off, 2*w_off)) # center\n\n return offsets\n\n\n def __call__(self, clips, selectedRegionOutput=False):\n h, w, c = clips.shape\n is_color = False\n if c % 3 == 0:\n is_color = True\n\n crop_height = 224\n crop_width = 224\n\n\n offsets = self.fillFixOffset(h, w)\n scaled_clips_list = []\n for offset in offsets:\n h_off = offset[0]\n w_off = offset[1]\n \n \n scaled_clips = np.zeros((self.height,self.width,c))\n scaled_clips_flips = np.zeros((self.height,self.width,c))\n if is_color:\n num_imgs = int(c / 3)\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id*3:frame_id*3+3]\n crop_img = cur_img[h_off:h_off+crop_height, w_off:w_off+crop_width, :]\n scaled_clips[:,:,frame_id*3:frame_id*3+3] = cv2.resize(crop_img, (self.width, self.height), self.interpolation)\n scaled_clips_flips = scaled_clips[:,::-1,:].copy()\n else:\n num_imgs = int(c / 1)\n for frame_id in range(num_imgs):\n cur_img = clips[:,:,frame_id:frame_id+1]\n crop_img = cur_img[h_off:h_off+crop_height, w_off:w_off+crop_width, :]\n scaled_clips[:,:,frame_id:frame_id+1] = np.expand_dims(cv2.resize(crop_img, (self.width, self.height), self.interpolation), axis=2)\n scaled_clips_flips = scaled_clips[:,::-1,:].copy()\n \n scaled_clips_list.append(np.expand_dims(scaled_clips,-1))\n scaled_clips_list.append(np.expand_dims(scaled_clips_flips,-1))\n return np.concatenate(scaled_clips_list,axis=-1)\n\nclass rawPoseAugmentation(object):\n def __init__(self, scale_ratios):\n self.possible_scale_tuples = []\n self.scale_ratios = scale_ratios\n for i in range(len(scale_ratios)):\n for j in range(len(scale_ratios)):\n if np.abs(i-j) < 2:\n scale_ration_height = self.scale_ratios[i]\n scale_ration_width = self.scale_ratios[j]\n self.possible_scale_tuples.append((scale_ration_height, scale_ration_width))\n self.length_possible_scale_tuples = len(self.possible_scale_tuples)\n def __call__(self, poses):\n selected_random_scale_tuple_index = np.random.randint(self.length_possible_scale_tuples)\n selected_scale_height = self.possible_scale_tuples[selected_random_scale_tuple_index][0]\n selected_scale_width = self.possible_scale_tuples[selected_random_scale_tuple_index][1]\n random_crop_height_start = np.random.uniform(0,1-selected_scale_height)\n random_crop_width_start = np.random.uniform(0,1-selected_scale_width)\n# pos_not_touched = poses.copy()\n check_width = poses[:,:,0,:] > random_crop_width_start + selected_scale_width\n check_height = poses[:,:,1,:] > random_crop_height_start + selected_scale_height\n check = np.logical_or(check_width,check_height)\n check = np.expand_dims(check, 2)\n check = np.concatenate((check,check),2)\n poses[check] = 0\n poses[:,:,0,:] -= random_crop_width_start\n poses[:,:,1,:] -= random_crop_height_start\n poses[poses < 0] = None\n poses[:,:,0,:] /= selected_scale_width\n poses[:,:,1,:] /= selected_scale_height\n if len(poses[poses>1]) > 0:\n print('basdasd')\n return poses\n \nclass pose_one_hot_decoding(object):\n def __init__(self,length):\n self.space = 0.1\n self.number_of_people = 1\n self.total_bins = self.number_of_people * 25\n self.one_hot_vector_length_per_joint = (1/self.space ) ** 2 \n self.one_hot_vector_length = int(self.total_bins * self.one_hot_vector_length_per_joint + 1)\n self.one_hot = np.zeros(self.one_hot_vector_length)\n self.length = length\n self.onehot_multiplication = np.repeat(range(self.total_bins), length).reshape(self.total_bins,length)\n def __call__(self, poses):\n poses = poses.reshape(-1,2,self.length)\n dim1 = np.floor(poses[:,0,:] / self.space)\n dim2 = np.floor(poses[:,1,:] / self.space)\n one_hot_values = (1/self.space ) * dim1 + dim2\n one_hot_values[np.isnan(one_hot_values)] = self.one_hot_vector_length_per_joint\n one_hot_values = one_hot_values * self.onehot_multiplication + one_hot_values\n one_hot_values[np.isnan(one_hot_values)] = self.one_hot_vector_length + 1\n \n return poses\n \nclass pose_one_hot_decoding2(object):\n def __init__(self,length):\n self.space = 1/32\n self.bin_number = int((1/self.space))\n self.number_of_people = 1\n self.total_bins = self.number_of_people * 25\n self.one_hot_vector_length = self.bin_number ** 2\n self.one_hot = np.zeros(self.one_hot_vector_length)\n self.length = length\n self.position_matrix = np.zeros([self.bin_number + 1, self.bin_number + 1, self.length])\n def __call__(self, poses):\n poses = poses.reshape(-1,2,self.length)\n dim1 = np.floor(poses[:,0,:] / self.space)\n dim2 = np.floor(poses[:,1,:] / self.space)\n dim1[np.isnan(dim1)] = self.bin_number\n dim2[np.isnan(dim2)] = self.bin_number\n dim1 = dim1.astype(np.int)\n dim2 = dim2.astype(np.int)\n for i in range(self.length):\n try:\n self.position_matrix[dim1[:,i], dim2[:,i], i] = 1\n except:\n print('hasdasd')\n one_hot_encoding = self.position_matrix[:self.bin_number, :self.bin_number, :]\n one_hot_encoding = one_hot_encoding.reshape(-1,self.length)\n one_hot_encoding_torch = torch.from_numpy(one_hot_encoding.transpose((1,0))).float()\n \n \n return one_hot_encoding_torch\n \nclass ToTensorPose(object):\n\n def __call__(self, clips):\n if isinstance(clips, np.ndarray):\n # handle numpy ar\n clips = clips - 0.5\n clips[np.isnan(clips)] = 0\n clips = torch.from_numpy(clips.transpose((3,0,1,2))).float()\n # backward compatibility\n return clips\n \n \n"
]
| [
[
"numpy.concatenate",
"numpy.logical_or",
"numpy.array",
"numpy.uint8",
"numpy.random.binomial",
"numpy.isnan",
"numpy.zeros",
"numpy.ascontiguousarray",
"numpy.min",
"numpy.flipud",
"numpy.fliplr",
"numpy.random.uniform",
"numpy.random.randint",
"torch.tensor",
"numpy.abs",
"numpy.absolute",
"numpy.expand_dims",
"numpy.floor"
]
]
|
mitming/mmselfsup | [
"5b5cb474776291cfcb9a1140afd11b696e11fcab"
]
| [
"tests/test_models/test_algorithms/test_cae.py"
]
| [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport platform\n\nimport pytest\nimport torch\n\nfrom mmselfsup.models.algorithms import CAE\n\n# model settings\nbackbone = dict(type='CAEViT', arch='b', patch_size=16, init_values=0.1)\nneck = dict(\n type='CAENeck',\n patch_size=16,\n embed_dims=768,\n num_heads=12,\n regressor_depth=4,\n decoder_depth=4,\n mlp_ratio=4,\n init_values=0.1,\n)\nhead = dict(\n type='CAEHead', tokenizer_path='cae_ckpt/encoder_stat_dict.pth', lambd=2)\n\n\[email protected](platform.system() == 'Windows', reason='Windows mem limit')\ndef test_cae():\n with pytest.raises(AssertionError):\n model = CAE(backbone=None, neck=neck, head=head)\n with pytest.raises(AssertionError):\n model = CAE(backbone=backbone, neck=None, head=head)\n with pytest.raises(AssertionError):\n model = CAE(backbone=backbone, neck=neck, head=None)\n\n model = CAE(backbone=backbone, neck=neck, head=head)\n model.init_weights()\n\n fake_input = torch.rand((1, 3, 224, 224))\n fake_target = torch.rand((1, 3, 112, 112))\n fake_mask = torch.zeros((1, 196)).bool()\n fake_mask[:, 75:150] = 1\n\n inputs = (fake_input, fake_target, fake_mask)\n\n fake_loss = model.forward_train(inputs)\n fake_feat = model.extract_feat(fake_input, fake_mask)\n assert isinstance(fake_loss['loss'].item(), float)\n assert list(fake_feat.shape) == [1, 122, 768]\n"
]
| [
[
"torch.zeros",
"torch.rand"
]
]
|
Freakwill/pyrimidine | [
"ff05998f110a69a002180d0dae2ae514a5807cfb"
]
| [
"pyrimidine/benchmarks/cluster.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport numpy.linalg as LA\n\nclass KMeans:\n \"\"\"KMeans clustering Problem\n\n min J(c,mu)=sum_i ||xi-mu_ci||\n where xi in ci\n \"\"\"\n def __init__(self, X, n_components=2):\n self.X = X\n self.n_components = n_components\n\n @staticmethod\n def random(N, p=2):\n X1 = np.random.normal(1, 1, (N, p))\n X2 = np.random.normal(2, 1, (N, p))\n X = np.vstack((X1, X2))\n return KMeans(X, n_components=2)\n\n\n def __call__(self, x):\n cs = set(x)\n xs = {c:[self.X[i] for i, k in enumerate(x) if k==c] for c in cs}\n # mus = {c:np.mean(x, axis=0) for c, x in xs.items()}\n J = np.mean([np.sum([LA.norm(xi - np.mean(x, axis=0)) for xi in x]) for c, x in xs.items()])\n return J\n\n# from scipy.stats import norm\n\n# class MixGaussian:\n# \"\"\"Mix Gaussian clustering Problem\n# X ~ sum a_i p(x|mu_i, S_i)\n\n# max L(ci,{mu_i,S_i})= prod_k p(xk|ci, {mu_i, S_i}) = prod_k sum_i a_i p(xk|mu_i, S_i)\n# \"\"\"\n# def __init__(self, X, n_components=2):\n# self.X = X\n# self.n_components = n_components\n\n# @staticmethod\n# def random(N, p=2):\n# X1 = norm.rvs(size=(N, p))\n# X2 = norm.rvs(loc=2, size=(N, p))\n# X = np.vstack((X1, X2))\n# return MixGaussian(X, n_components=2)\n\n\n# def __call__(self, t):\n# cs = set(t)\n# xs = {c:[self.X[i] for i, k in enumerate(x) if k==c] for c in cs}\n# # mus = {c:np.mean(x, axis=0) for c, x in xs.items()}\n# J = np.sum([np.prod([norm.pdf(xi, ti) for xi in x]) for c, x in xs.items()])\n# return J\n"
]
| [
[
"numpy.random.normal",
"numpy.mean",
"numpy.vstack"
]
]
|
ishine/Cross-Speaker-Emotion-Transfer | [
"9d38e8058f5abc06167bac244d8ace083e2a6220"
]
| [
"model/loss.py"
]
| [
"import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass XSpkEmoTransLoss(nn.Module):\r\n \"\"\" XSpkEmoTrans Loss \"\"\"\r\n\r\n def __init__(self, preprocess_config, model_config, train_config):\r\n super(XSpkEmoTransLoss, self).__init__()\r\n self.alpha = train_config[\"loss\"][\"alpha\"]\r\n self.beta = train_config[\"loss\"][\"beta\"]\r\n self.mse_loss = nn.MSELoss()\r\n self.mae_loss = nn.L1Loss()\r\n self.bce_loss = nn.BCELoss()\r\n\r\n def forward(self, inputs, predictions):\r\n (\r\n mel_targets,\r\n _,\r\n _,\r\n _,\r\n _,\r\n duration_targets,\r\n *_,\r\n ) = inputs[6:]\r\n (\r\n mel_iters,\r\n score_hard,\r\n score_soft,\r\n log_duration_predictions,\r\n _,\r\n src_masks,\r\n mel_masks,\r\n _,\r\n mel_lens,\r\n ) = predictions\r\n src_masks = ~src_masks\r\n mel_masks = ~mel_masks\r\n log_duration_targets = torch.log(duration_targets.float() + 1)\r\n mel_targets = mel_targets[:, : mel_masks.shape[1], :]\r\n mel_masks = mel_masks[:, :mel_masks.shape[1]]\r\n\r\n log_duration_targets.requires_grad = False\r\n mel_targets.requires_grad = False\r\n score_hard.requires_grad = False\r\n mel_lens.requires_grad = False\r\n\r\n log_duration_predictions = log_duration_predictions.masked_select(\r\n src_masks)\r\n log_duration_targets = log_duration_targets.masked_select(src_masks)\r\n\r\n mel_targets = mel_targets.masked_select(mel_masks.unsqueeze(-1))\r\n\r\n # Iterative Loss\r\n mel_iter_loss = torch.zeros_like(mel_lens, dtype=mel_targets.dtype)\r\n for mel_iter in mel_iters:\r\n mel_iter_loss += self.mae_loss(mel_iter.masked_select(\r\n mel_masks.unsqueeze(-1)), mel_targets)\r\n mel_loss = (mel_iter_loss / (len(mel_iters) * mel_lens)).mean()\r\n\r\n emotion_classifier_loss = self.bce_loss(score_soft, score_hard)\r\n\r\n duration_loss = self.mse_loss(\r\n log_duration_predictions, log_duration_targets)\r\n\r\n total_loss = (\r\n mel_loss + self.alpha * emotion_classifier_loss + self.beta * duration_loss\r\n )\r\n\r\n return (\r\n total_loss,\r\n mel_loss,\r\n emotion_classifier_loss,\r\n duration_loss,\r\n )\r\n"
]
| [
[
"torch.zeros_like",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"torch.nn.L1Loss"
]
]
|
DeepHiveMind/autokeras | [
"735fce7f5b21befc5d20b768df628951c619a086"
]
| [
"autokeras/tasks/time_series_forecaster.py"
]
| [
"import pandas as pd\n\nfrom autokeras import auto_model\nfrom autokeras import blocks\nfrom autokeras import nodes as input_module\nfrom autokeras.tasks.structured_data_mixin import StructuredDataMixin\nfrom autokeras.tuners import greedy\n\n\nclass SupervisedTimeseriesDataPipeline(StructuredDataMixin, auto_model.AutoModel):\n\n def __init__(self,\n outputs,\n column_names=None,\n column_types=None,\n lookback=None,\n predict_from=1,\n predict_until=None,\n **kwargs):\n inputs = input_module.TimeseriesInput(lookback=lookback,\n column_names=column_names,\n column_types=column_types)\n self.check(column_names, column_types)\n super().__init__(inputs=inputs,\n outputs=outputs,\n **kwargs)\n self.predict_from = predict_from\n self.predict_until = predict_until\n self._target_col_name = None\n self.train_len = 0\n\n @staticmethod\n def _read_from_csv(x, y):\n df = pd.read_csv(x)\n target = df.pop(y).dropna().to_numpy()\n return df, target\n\n def fit(self,\n x=None,\n y=None,\n epochs=None,\n callbacks=None,\n validation_split=0.2,\n validation_data=None,\n **kwargs):\n # x is file path of training data\n if isinstance(x, str):\n self._target_col_name = y\n x, y = self._read_from_csv(x, y)\n if validation_data:\n x_val, y_val = validation_data\n if isinstance(x_val, str):\n validation_data = self._read_from_csv(x_val, y_val)\n\n self.train_len = len(y)\n\n super().fit(x=x[:self.train_len],\n y=y[self.lookback-1:],\n epochs=epochs,\n callbacks=callbacks,\n validation_split=validation_split,\n validation_data=validation_data,\n **kwargs)\n\n def predict(self, x, batch_size=32, **kwargs):\n x = self.read_for_predict(x)\n y_pred = super().predict(x=x,\n batch_size=batch_size,\n **kwargs)\n lower_bound = self.train_len + self.predict_from\n if self.predict_until is None:\n self.predict_until = len(y_pred)\n upper_bound = min(self.train_len + self.predict_until + 1, len(y_pred))\n return y_pred[lower_bound:upper_bound]\n\n def evaluate(self, x, y=None, batch_size=32, **kwargs):\n \"\"\"Evaluate the best model for the given data.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Testing data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the testing data.\n y: String, numpy.ndarray, or tensorflow.Dataset. Testing data y.\n If the data is from a csv file, it should be a string corresponding\n to the label column.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.evaluate.\n\n # Returns\n Scalar test loss (if the model has a single output and no metrics) or\n list of scalars (if the model has multiple outputs and/or metrics).\n The attribute model.metrics_names will give you the display labels for\n the scalar outputs.\n \"\"\"\n if isinstance(x, str):\n x, y = self._read_from_csv(x, y)\n return super().evaluate(x=x[:len(y)],\n y=y[self.lookback-1:],\n batch_size=batch_size,\n **kwargs)\n\n\nclass TimeseriesForecaster(SupervisedTimeseriesDataPipeline):\n \"\"\"AutoKeras time series data forecast class.\n\n # Arguments\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will be obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data.\n lookback: Int. The range of history steps to consider for each prediction.\n For example, if lookback=n, the data in the range of [i - n, i - 1]\n is used to predict the value of step i. If unspecified, it will be tuned\n automatically.\n predict_from: Int. The starting point of the forecast for each sample (in\n number of steps) after the last time step in the input. If N is the last\n step in the input, then the first step of the predicted output will be\n N + predict_from. Defaults to 1 (which corresponds to starting the\n forecast immediately after the last step in the input).\n predict_until: Int. The end point of the forecast for each sample (in number\n of steps) after the last time step in the input. If N is the last step in\n the input, then the last step of the predicted output will be\n N + predict_until. If unspecified, it will predict till end of dataset.\n Defaults to None.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n project_name: String. The name of the AutoModel. Defaults to\n 'time_series_forecaster'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n overwrite: Boolean. Defaults to `True`. If `False`, reloads an existing\n project of the same name if one is found. Otherwise, overwrites the\n project.\n seed: Int. Random seed.\n **kwargs: Any arguments supported by AutoModel.\n \"\"\"\n\n def __init__(self,\n output_dim=None,\n column_names=None,\n column_types=None,\n lookback=None,\n predict_from=1,\n predict_until=None,\n loss='mean_squared_error',\n metrics=None,\n project_name='time_series_forecaster',\n max_trials=100,\n directory=None,\n objective='val_loss',\n overwrite=True,\n seed=None,\n **kwargs):\n super().__init__(outputs=blocks.RegressionHead(output_dim=output_dim,\n loss=loss,\n metrics=metrics),\n column_names=column_names,\n column_types=column_types,\n lookback=lookback,\n predict_from=predict_from,\n predict_until=predict_until,\n project_name=project_name,\n max_trials=max_trials,\n directory=directory,\n objective=objective,\n tuner=greedy.Greedy,\n overwrite=overwrite,\n seed=seed,\n **kwargs)\n self.lookback = lookback\n self.predict_from = predict_from\n self.predict_until = predict_until\n\n def fit(self,\n x=None,\n y=None,\n validation_split=0.2,\n validation_data=None,\n **kwargs):\n \"\"\"Search for the best model and hyperparameters for the task.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the training data.\n y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or\n tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a list of string(s)\n specifying the name(s) of the column(s) need to be forecasted.\n If it is multivariate forecasting, y should be a list of more than\n one column names. If it is univariate forecasting, y should be a\n string or a list of one string.\n validation_split: Float between 0 and 1. Defaults to 0.2.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n The best model found would be fit on the entire dataset including the\n validation data.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n The best model found would be fit on the training dataset without the\n validation data.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n super().fit(x=x,\n y=y,\n validation_split=validation_split,\n validation_data=validation_data,\n **kwargs)\n\n def predict(self, x=None, batch_size=32, **kwargs):\n \"\"\"Predict the output for a given testing data.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Testing data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the testing data.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.predict.\n\n # Returns\n A list of numpy.ndarray objects or a single numpy.ndarray.\n The predicted results.\n \"\"\"\n return super().predict(x=x, batch_size=batch_size, **kwargs)\n\n def fit_and_predict(self,\n x=None,\n y=None,\n validation_split=0.2,\n validation_data=None,\n batch_size=32,\n **kwargs):\n \"\"\"Search for the best model and then predict for remaining data points.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the training data.\n y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or\n tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a list of string(s)\n specifying the name(s) of the column(s) need to be forecasted.\n If it is multivariate forecasting, y should be a list of more than\n one column names. If it is univariate forecasting, y should be a\n string or a list of one string.\n validation_split: Float between 0 and 1. Defaults to 0.2.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n The best model found would be fit on the entire dataset including the\n validation data.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n The best model found would be fit on the training dataset without the\n validation data.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n self.fit(x=x,\n y=y,\n validation_split=validation_split,\n validation_data=validation_data,\n **kwargs)\n\n return self.predict(x=x, batch_size=batch_size)\n\n\nclass TimeseriesClassifier(SupervisedTimeseriesDataPipeline):\n \"\"\"\"AutoKeras time series data classification class.\n\n # Arguments\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will be obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data.\n lookback: Int. The range of history steps to consider for each prediction.\n For example, if lookback=n, the data in the range of [i - n, i - 1]\n is used to predict the value of step i. If unspecified, it will be tuned\n automatically.\n predict_from: Int. The starting point of the forecast for each sample (in\n number of steps) after the last time step in the input. If N is the last\n step in the input, then the first step of the predicted output will be\n N + predict_from. Defaults to 1 (which corresponds to starting the\n forecast immediately after the last step in the input).\n predict_until: Int. The end point of the forecast for each sample (in number\n of steps) after the last time step in the input. If N is the last step in\n the input, then the last step of the predicted output will be\n N + predict_until. If unspecified, it will predict till end of dataset.\n Defaults to None.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n project_name: String. The name of the AutoModel. Defaults to\n 'time_series_forecaster'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n overwrite: Boolean. Defaults to `True`. If `False`, reloads an existing\n project of the same name if one is found. Otherwise, overwrites the\n project.\n seed: Int. Random seed.\n **kwargs: Any arguments supported by AutoModel.\n \"\"\"\n\n def __init__(self,\n output_dim=None,\n column_names=None,\n column_types=None,\n lookback=None,\n predict_from=1,\n predict_until=None,\n loss='mean_squared_error',\n metrics=None,\n project_name='time_series_classifier',\n max_trials=100,\n directory=None,\n objective='val_loss',\n overwrite=True,\n seed=None,\n **kwargs):\n raise NotImplementedError\n\n def fit(self,\n x=None,\n y=None,\n validation_split=0.2,\n validation_data=None,\n **kwargs):\n \"\"\"Search for the best model and hyperparameters for the task.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the training data.\n y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or\n tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a list of string(s)\n specifying the name(s) of the column(s) need to be forecasted.\n If it is multivariate forecasting, y should be a list of more than\n one column names. If it is univariate forecasting, y should be a\n string or a list of one string.\n validation_split: Float between 0 and 1. Defaults to 0.2.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n The best model found would be fit on the entire dataset including the\n validation data.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n The best model found would be fit on the training dataset without the\n validation data.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n raise NotImplementedError\n\n def predict(self, x=None, batch_size=32, **kwargs):\n \"\"\"Predict the output for a given testing data.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Testing data x, it should also contain the training data used as,\n subsequent predictions depend on them. If the data is from a csv\n file, it should be a string specifying the path of the csv file\n of the testing data.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.predict.\n\n # Returns\n A list of numpy.ndarray objects or a single numpy.ndarray.\n The predicted results.\n \"\"\"\n raise NotImplementedError\n\n def fit_and_predict(self,\n x=None,\n y=None,\n validation_split=0.2,\n validation_data=None,\n batch_size=32,\n **kwargs):\n \"\"\"Search for the best model and then predict for remaining data points.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training and Test data x. If the data is from a csv file, it\n should be a string specifying the path of the csv file of the\n training data.\n y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or\n tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a list of string(s)\n specifying the name(s) of the column(s) need to be forecasted.\n If it is multivariate forecasting, y should be a list of more than\n one column names. If it is univariate forecasting, y should be a\n string or a list of one string.\n validation_split: Float between 0 and 1. Defaults to 0.2.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n The best model found would be fit on the entire dataset including the\n validation data.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n The best model found would be fit on the training dataset without the\n validation data.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n raise NotImplementedError\n"
]
| [
[
"pandas.read_csv"
]
]
|
walkerasindave/spotmidify | [
"f83bca08881bdf48e5e583e8931890e9eaf4069f"
]
| [
"spotify.py"
]
| [
"import time\nimport asyncio\nfrom typing import List\nimport threading\nimport numpy as np\n\n\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\n\nfrom eventhook import EventHook\n\n\nclass NotPlayingError(Exception):\n def __init__(self):\n self.message = \"Spotify not playing\"\n\n\nclass MonitorConfig:\n def __init__(\n self,\n refresh_accuracy_seconds: float = 1.0,\n refresh_max_delay_seconds: float = 30.0,\n refresh_next_event_divisor: float = 1.5,\n not_playing_refresh_seconds: float = 5.0,\n tick_accuracy_seconds: float = 0.25,\n tick_max_delay_seconds: float = 10.0,\n tick_next_event_divisor: float = 2.0,\n section_offset_seconds: float = 0.25,\n ):\n self.refresh_accuracy_seconds = refresh_accuracy_seconds\n self.refresh_max_delay_seconds = refresh_max_delay_seconds\n self.refresh_next_event_divisor = refresh_next_event_divisor\n self.not_playing_refresh_seconds = not_playing_refresh_seconds\n self.tick_accuracy_seconds = tick_accuracy_seconds\n self.tick_max_delay_seconds = tick_max_delay_seconds\n self.tick_next_event_divisor = tick_next_event_divisor\n self.section_offset_seconds = section_offset_seconds\n\n\nclass spotifyMonitor:\n def __init__(\n self,\n config: MonitorConfig = MonitorConfig(),\n debug: bool = False,\n ) -> None:\n self.config = config\n self.sp = self._generate_spotify_auth()\n self.on_track_change = EventHook()\n self.on_section_change = EventHook()\n self.on_stop = EventHook()\n self.current_track = {\"id\": None, \"progress\": 0.0, \"sections\": []}\n self.current_section = {\"id\": None, \"track_id\": None}\n self.next_section = {\"id\": None, \"track_id\": None}\n self._loop = asyncio.get_event_loop()\n self._last_tick = self._get_tick_time()\n self.debug = debug\n self._ticking = False\n self._playing = True\n\n def start(self):\n try:\n self._loop.call_soon(self._refresh)\n self._loop.run_forever()\n finally:\n self._loop.run_until_complete(self._loop.shutdown_asyncgens())\n self._loop.close()\n\n def stop(self):\n self._loop.stop()\n\n def _generate_spotify_auth(self) -> spotipy.Spotify:\n scope = \"user-read-playback-state\"\n return spotipy.Spotify(\n auth_manager=SpotifyOAuth(\n scope=scope,\n client_id=\"397df7bde7e64245bf93014ce0d36b4f\",\n client_secret=\"5d7d498988714957990b45afa47fdd36\",\n redirect_uri=\"http://127.0.0.1:9090\",\n )\n )\n\n def _refresh(self):\n try:\n self._refresh_track_status()\n self._playing = True\n if self.debug:\n print(\" Refresh {}\".format(self.current_track[\"progress\"]))\n if self._ticking == False:\n self._last_tick = self._get_tick_time()\n self._ticking = True\n self._loop.call_soon(self._tick)\n\n delay = (\n self.current_track[\"duration\"] - self.current_track[\"progress\"]\n ) / self.config.refresh_next_event_divisor\n if delay > self.config.refresh_max_delay_seconds:\n delay = self.config.refresh_max_delay_seconds\n elif delay < self.config.refresh_accuracy_seconds:\n delay = self.config.refresh_accuracy_seconds\n except NotPlayingError:\n if self._playing:\n self._playing = False\n self.on_stop.fire()\n\n delay = 5\n if self.debug:\n print(\" Refresh (not playing)\")\n\n self._loop.call_later(delay=delay, callback=self._refresh)\n\n def _tick(self):\n if self._playing:\n this_tick = self._get_tick_time()\n self.current_track[\"progress\"] += (this_tick - self._last_tick) / 1000\n self._last_tick = this_tick\n if self.debug:\n print(\" Tick {}\".format(self.current_track[\"progress\"]))\n\n current_section_id = self._calculate_current_section_id(self.current_track)\n\n if current_section_id != self.current_section[\"id\"]:\n section_info = self._calculate_section_info(\n self.current_track, current_section_id\n )\n self._trigger_section_change(self.current_track, section_info)\n self.current_section = section_info[\"current_section\"]\n self.next_section = section_info[\"next_section\"]\n\n delay = (\n self.next_section[\"start\"] - self.current_track[\"progress\"]\n ) / self.config.tick_next_event_divisor\n if delay > self.config.tick_max_delay_seconds:\n delay = self.config.tick_max_delay_seconds\n elif delay < self.config.tick_accuracy_seconds:\n delay = self.next_section[\"start\"] - self.current_track[\"progress\"]\n\n if delay < 0:\n delay = self.config.tick_accuracy_seconds\n self._loop.call_later(delay=delay, callback=self._tick)\n else:\n self._ticking = False\n\n def _get_tick_time(self) -> float:\n return time.time_ns() // 1000000\n\n def _refresh_track_status(self):\n current_track = self._get_current_track_status()\n track_change = self.current_track[\"id\"] != current_track[\"id\"]\n\n section_info = self._calculate_section_info(current_track)\n section_change = (\n self.current_section[\"id\"] != section_info[\"current_section\"][\"id\"]\n or self.current_section[\"track_id\"]\n != section_info[\"current_section\"][\"track_id\"]\n )\n\n if track_change:\n self._trigger_track_change(current_track, section_info)\n elif section_change:\n self._trigger_section_change(current_track, section_info)\n\n self.current_track = current_track\n self._last_tick = self._get_tick_time()\n self.current_section = section_info[\"current_section\"]\n self.next_section = section_info[\"next_section\"]\n\n def _trigger_track_change(self, track, section_info):\n nth = threading.Thread(\n target=self.on_track_change.fire(\n previous_track=self.current_track,\n current_track=track,\n current_section=section_info[\"current_section\"],\n next_section=section_info[\"next_section\"],\n )\n )\n nth.start()\n\n def _trigger_section_change(self, track, section_info):\n nth = threading.Thread(\n target=self.on_section_change.fire(\n current_track=track,\n current_section=section_info[\"current_section\"],\n next_section=section_info[\"next_section\"],\n )\n )\n nth.start()\n\n def _get_current_track_status(self) -> dict:\n track = self._get_spotify_currently_playing()\n\n if track[\"id\"] != self.current_track[\"id\"]:\n track_info = self._get_spotify_track_info(track_id=track[\"id\"])\n track_features = self._get_spotify_track_features(track_id=track[\"id\"])\n current_track = {**track, **track_info, **track_features}\n else:\n current_track = self.current_track\n current_track[\"progress\"] = track[\"progress\"]\n\n return current_track\n\n def _calculate_section_info(self, track, current_section_id: int = None) -> dict:\n if not current_section_id:\n current_section_id = self._calculate_current_section_id(track)\n track_sections = track[\"sections\"]\n\n section = {\n **{\"id\": current_section_id, \"track_id\": track[\"id\"]},\n **track_sections[current_section_id],\n }\n if current_section_id + 1 < len(track_sections):\n next_section = track_sections[current_section_id + 1]\n else:\n next_section = {\n \"id\": 0,\n \"track_id\": None,\n \"tempo\": None,\n \"loudness\": None,\n \"start\": track[\"duration\"],\n }\n return {\"current_section\": section, \"next_section\": next_section}\n\n def _calculate_current_section_id(self, track) -> int:\n current_section_id = 0\n for index, section in enumerate(track[\"sections\"]):\n if section[\"start\"] < track[\"progress\"]:\n current_section_id = index\n if section[\"start\"] > track[\"progress\"]:\n break\n\n return current_section_id\n\n def _get_spotify_currently_playing(self) -> dict:\n # print(\" CALL to currently_playing\")\n try:\n result = self.sp.currently_playing()\n if result:\n if result[\"is_playing\"]:\n return {\n \"id\": result[\"item\"][\"id\"],\n \"name\": result[\"item\"][\"name\"],\n \"artist\": result[\"item\"][\"artists\"][0][\"name\"],\n \"duration\": result[\"item\"][\"duration_ms\"] / 1000,\n \"progress\": result[\"progress_ms\"] / 1000,\n }\n else:\n raise NotPlayingError\n else:\n raise NotPlayingError\n # FIXME - Add 401 error here\n except ValueError:\n return {\n \"id\": None,\n \"name\": None,\n \"artist\": None,\n \"duration\": None,\n \"progress\": None,\n }\n\n def _get_spotify_track_info(self, track_id) -> dict:\n # print(\" CALL to audio_analysis\")\n try:\n result = self.sp.audio_analysis(track_id=track_id)\n for section in result[\"sections\"]:\n section[\"start\"] = section[\"start\"] - self.config.section_offset_seconds\n\n loudnesses = [\n section[\"loudness\"]\n for section in result[\"sections\"]\n if \"loudness\" in section\n ]\n return {\n \"id\": track_id,\n \"duration\": result[\"track\"][\"duration\"],\n \"tempo\": result[\"track\"][\"tempo\"],\n \"loudness\": result[\"track\"][\"loudness\"],\n \"key\": result[\"track\"][\"key\"],\n \"sections\": result[\"sections\"],\n \"sections_loudness_mean\": np.mean(loudnesses),\n \"sections_loudness_upperq\": np.quantile(loudnesses, 0.75),\n }\n # FIXME - Add 401 error here\n except ValueError:\n return {\"tempo\": None, \"loudness\": None, \"sections\": List()}\n\n def _get_spotify_track_features(self, track_id) -> dict:\n try:\n result = self.sp.audio_features(tracks=[track_id])\n return {\n \"danceability\": result[0][\"danceability\"],\n \"energy\": result[0][\"energy\"],\n \"key\": result[0][\"key\"],\n \"loudness\": result[0][\"loudness\"],\n \"speechiness\": result[0][\"speechiness\"],\n \"acousticness\": result[0][\"acousticness\"],\n \"instrumentalness\": result[0][\"instrumentalness\"],\n \"liveness\": result[0][\"liveness\"],\n \"valence\": result[0][\"valence\"],\n \"tempo\": result[0][\"tempo\"],\n \"time_signature\": result[0][\"time_signature\"],\n }\n # FIXME - Add 401 error here\n except ValueError:\n return {\"tempo\": None, \"loudness\": None, \"sections\": List()}\n\n def _get_playlist(self, playlist_id) -> dict:\n try:\n result = self.sp.playlist(playlist_id=playlist_id)\n tracks = []\n for item in result[\"tracks\"][\"items\"]:\n tracks.append(\n {\n \"playlist_name\": result[\"name\"],\n \"playlist_id\": result[\"id\"],\n \"id\": item[\"track\"][\"id\"],\n \"name\": item[\"track\"][\"name\"],\n \"duration\": item[\"track\"][\"duration_ms\"] / 1000,\n }\n )\n return tracks\n except ValueError:\n return []\n"
]
| [
[
"numpy.quantile",
"numpy.mean"
]
]
|
Anysomeday/FDSSC | [
"c924ea06c7853bf4e66e04c115fd51e0441500ff"
]
| [
"evaluate_model.py"
]
| [
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy.io as sio\nfrom keras.utils.np_utils import to_categorical\nfrom keras.optimizers import Adam, SGD, Adadelta, RMSprop, Nadam\nimport time, datetime\nimport collections\nfrom sklearn import metrics, preprocessing\nfrom operator import truediv\nfrom Utils import fdssc_model, record, extract_samll_cubic\n\n\ndef sampling(proportion, ground_truth):\n train = {}\n test = {}\n labels_loc = {}\n m = max(ground_truth)\n for i in range(m):\n indexes = [j for j, x in enumerate(ground_truth.ravel().tolist()) if x == i + 1]\n np.random.shuffle(indexes)\n labels_loc[i] = indexes\n nb_val = int(proportion * len(indexes))\n train[i] = indexes[:-nb_val]\n test[i] = indexes[-nb_val:]\n train_indexes = []\n test_indexes = []\n for i in range(m):\n train_indexes += train[i]\n test_indexes += test[i]\n np.random.shuffle(train_indexes)\n np.random.shuffle(test_indexes)\n return train_indexes, test_indexes\n\n\ndef our_model():\n model = fdssc_model.fdssc_model.build((1, img_rows, img_cols, img_channels), nb_classes)\n rms = RMSprop(lr=0.0003)\n model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])\n\n return model\n\n\ndef aa_and_each_accuracy(confusion_matrix):\n list_diag = np.diag(confusion_matrix)\n list_raw_sum = np.sum(confusion_matrix, axis=1)\n each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))\n average_acc = np.mean(each_acc)\n return each_acc, average_acc\n\n\nglobal Dataset\ndataset = input('please input the name of Dataset(IN, UP or KSC):')\nDataset = dataset.upper()\nif Dataset == 'IN':\n mat_data = sio.loadmat('datasets/Indian_pines_corrected.mat')\n data_hsi = mat_data['indian_pines_corrected']\n mat_gt = sio.loadmat('datasets/Indian_pines_gt.mat')\n gt_hsi = mat_gt['indian_pines_gt']\n TOTAL_SIZE = 10249\n TRAIN_SIZE = 2055\n VALIDATION_SPLIT = 0.8\n\n\nif Dataset == 'UP':\n uPavia = sio.loadmat('datasets/PaviaU.mat')\n gt_uPavia = sio.loadmat('datasets/PaviaU_gt.mat')\n data_hsi = uPavia['paviaU']\n gt_hsi = gt_uPavia['paviaU_gt']\n TOTAL_SIZE = 42776\n TRAIN_SIZE = 4281\n VALIDATION_SPLIT = 0.9\n\nif Dataset == 'KSC':\n KSC = sio.loadmat('datasets/KSC.mat')\n gt_KSC = sio.loadmat('datasets/KSC_gt.mat')\n data_hsi = KSC['KSC']\n gt_hsi = gt_KSC['KSC_gt']\n TOTAL_SIZE = 5211\n TRAIN_SIZE = 1048\n VALIDATION_SPLIT = 0.8\n\n\nprint(data_hsi.shape)\ndata = data_hsi.reshape(np.prod(data_hsi.shape[:2]), np.prod(data_hsi.shape[2:]))\ngt = gt_hsi.reshape(np.prod(gt_hsi.shape[:2]),)\nnb_classes = max(gt)\nprint('the class numbers of the HSI data is:', nb_classes)\n\nprint('-----Importing Setting Parameters-----')\nbatch_size = 32\nnb_epoch = 80\nITER = 10\nPATCH_LENGTH = 4\n\nimg_rows = 2*PATCH_LENGTH+1\nimg_cols = 2*PATCH_LENGTH+1\nimg_channels = data_hsi.shape[2]\nINPUT_DIMENSION = data_hsi.shape[2]\n\nVAL_SIZE = int(0.5*TRAIN_SIZE)\nTEST_SIZE = TOTAL_SIZE - TRAIN_SIZE\n\ndata = preprocessing.scale(data)\ndata_ = data.reshape(data_hsi.shape[0], data_hsi.shape[1], data_hsi.shape[2])\nwhole_data = data_\npadded_data = np.lib.pad(whole_data, ((PATCH_LENGTH, PATCH_LENGTH), (PATCH_LENGTH, PATCH_LENGTH), (0, 0)),\n 'constant', constant_values=0)\n\nCATEGORY = nb_classes\nday_str = input('please input the number of model:')\n\nKAPPA = []\nOA = []\nAA = []\nTRAINING_TIME = []\nTESTING_TIME = []\nELEMENT_ACC = np.zeros((ITER, CATEGORY))\n\nseeds = [1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340]\n\nfor index_iter in range(ITER):\n print(\"-----Starting the %d Iteration-----\" % (index_iter + 1))\n best_weights_path = 'models/'+Dataset+'_FDSSC_'+day_str+'@'+str(index_iter+1)+'.hdf5'\n\n np.random.seed(seeds[index_iter])\n train_indices, test_indices = sampling(VALIDATION_SPLIT, gt)\n\n TRAIN_SIZE = len(train_indices)\n print('Train size: ', TRAIN_SIZE)\n TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE\n print('Test size: ', TEST_SIZE)\n VAL_SIZE = int(0.5*TRAIN_SIZE)\n print('Validation size: ', VAL_SIZE)\n\n y_train = gt[train_indices]-1\n y_train = to_categorical(np.asarray(y_train))\n\n y_test = gt[test_indices]-1\n y_test = to_categorical(np.asarray(y_test))\n\n print('-----Selecting Small Pieces from the Original Cube Data-----')\n train_data = extract_samll_cubic.select_small_cubic(TRAIN_SIZE, train_indices, whole_data,\n PATCH_LENGTH, padded_data, INPUT_DIMENSION)\n test_data = extract_samll_cubic.select_small_cubic(TEST_SIZE, test_indices, whole_data,\n PATCH_LENGTH, padded_data, INPUT_DIMENSION)\n\n x_train = train_data.reshape(train_data.shape[0], train_data.shape[1], train_data.shape[2], INPUT_DIMENSION)\n x_test_all = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], INPUT_DIMENSION)\n\n x_val = x_test_all[-VAL_SIZE:]\n y_val = y_test[-VAL_SIZE:]\n\n x_test = x_test_all[:-VAL_SIZE]\n y_test = y_test[:-VAL_SIZE]\n\n model_fdssc = our_model()\n\n model_fdssc.load_weights(best_weights_path)\n\n pred_test_fdssc = model_fdssc.predict(x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3], 1)).argmax(axis=1)\n collections.Counter(pred_test_fdssc)\n gt_test = gt[test_indices] - 1\n\n overall_acc_fdssc = metrics.accuracy_score(pred_test_fdssc, gt_test[:-VAL_SIZE])\n confusion_matrix_fdssc = metrics.confusion_matrix(pred_test_fdssc, gt_test[:-VAL_SIZE])\n each_acc_fdssc, average_acc_fdssc = aa_and_each_accuracy(confusion_matrix_fdssc)\n kappa = metrics.cohen_kappa_score(pred_test_fdssc, gt_test[:-VAL_SIZE])\n\n OA.append(overall_acc_fdssc)\n AA.append(average_acc_fdssc)\n KAPPA.append(kappa)\n ELEMENT_ACC[index_iter, :] = each_acc_fdssc\n\nprint(\"--------FDSSC Evaluation Finished-----------\")\nrecord.record_output(OA, AA, KAPPA, ELEMENT_ACC,TRAINING_TIME, TESTING_TIME,\n 'records/' + Dataset + '_fdssc_' + day_str + '.txt')\n"
]
| [
[
"sklearn.metrics.confusion_matrix",
"numpy.asarray",
"numpy.zeros",
"numpy.lib.pad",
"numpy.sum",
"numpy.random.seed",
"scipy.io.loadmat",
"numpy.random.shuffle",
"numpy.mean",
"sklearn.preprocessing.scale",
"sklearn.metrics.accuracy_score",
"numpy.prod",
"numpy.diag",
"sklearn.metrics.cohen_kappa_score"
]
]
|
lizhangjie316/ComputerVision | [
"86d82358bd160074d154773df0284e1154a6d077"
]
| [
"01-python/source code/05_matplotlib/01.py"
]
| [
"import numpy as np\n\nimport matplotlib.pyplot as plt\n\nx = np.linspace(-1,1,50)\n#y = 2*x+1\ny = x**2\n\nprint(x)\n\nplt.plot(x,y)\nplt.show()\n\n"
]
| [
[
"matplotlib.pyplot.show",
"numpy.linspace",
"matplotlib.pyplot.plot"
]
]
|
tillenius/superglue | [
"4f4fe917b4d7a11cb971f349a885bef4e869ae75"
]
| [
"scripts/drawsched.py"
]
| [
"#!/usr/bin/env python\n\nimport sys\nimport re\n\n#from matplotlib import rc\n#\n#rc('text', usetex=True)\n#rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], \n# 'monospace': ['Computer Modern Typewriter']})\n\n##################################################\n# CONFIG\n##################################################\n\n# which tasks to show (return true for desired tasks)\ndef myfilter(x):\n return True\n\nfillcols = ['#ff0000', '#00ff00', '#0000ff', '#ffff00', '#ff00ff', '#00ffff']\nedgecols = ['#000000', '#000000', '#000000', '#000000', '#000000', '#000000']\ncoldict = dict()\nnextcol = 0\n\n# which color to use for each task (fillcolor, edgecolor)\ndef getColor(text):\n global nextcol\n\n spl = text.split();\n if len(spl) > 1:\n text = spl[0]\n\n # hard-code task names to colors\n if text.startswith(\"read\"): return ['#000000', '#000000']\n\n # distribute colors cyclically\n if text in coldict:\n colidx = coldict[text]\n else:\n colidx = nextcol\n nextcol = (nextcol + 1) % len(fillcols)\n coldict[text] = colidx\n return [fillcols[colidx], edgecols[colidx]]\n\n# if any text is to be attached to the task\ndef getText(text):\n return []\n\ndef getExtra(text):\n m = re.match(r'.* tasks: ([0-9]+)', text)\n if m:\n return m.group(1)\n return []\n\n##################################################\ndef load_file(filename):\n fh = open(filename, \"r\")\n out = list()\n pattern = re.compile('([0-9]+): ([0-9]+) ([0-9]+) (.*)')\n mpipattern = re.compile('([0-9]+) ([0-9]+): ([0-9]+) ([0-9]+) (.*)')\n cpattern = re.compile('(.*) \\((.*)\\)')\n while True:\n line = fh.readline()\n if not line:\n break\n if line.startswith('LOG 2'):\n continue\n # [thread id]: [start] [length] [name [perf]]\n g = pattern.match(line)\n if g != None:\n name = g.group(4)\n gg = cpattern.match(name)\n cache = 0\n if gg != None:\n name = gg.group(1)\n cache = int(gg.group(2))\n out.append({'name': name.strip(),\n 'procid': 0,\n 'threadid': int(g.group(1)),\n 'start': int(g.group(2)),\n 'length': int(g.group(3)),\n 'end': int(g.group(2)) + int(g.group(3)),\n 'cache': cache})\n continue\n\n # [node number] [thread id]: [start] [length] [name [perf]]\n g = mpipattern.match(line)\n if g != None:\n name = g.group(5)\n gg = cpattern.match(name)\n cache = 0\n if gg != None:\n name = gg.group(1)\n cache = int(gg.group(2))\n out.append({'name': name.strip(),\n 'procid': int(g.group(1)),\n 'threadid': int(g.group(2)),\n 'start': int(g.group(3)),\n 'length': int(g.group(4)),\n 'end': int(g.group(3)) + int(g.group(4)),\n 'cache': cache})\n continue\n\n # [thread id] [start] [length] [name]\n w = line.split()\n if len(w) == 4:\n out.append({'name': w[3],\n 'procid': 0,\n 'threadid': int(w[0]),\n 'start': float(w[1])*1e6,\n 'length': float(w[2])*1e6,\n 'end': (float(w[1])+float(w[2]))*1e6,\n 'cache': 0})\n continue\n\n # parse error\n print(\"Error parsing line: \", line)\n fh.close()\n return out\n\ndef getMedian(numericValues):\n theValues = sorted(numericValues)\n\n if len(theValues) % 2 == 1:\n return theValues[(len(theValues)+1)//2-1]\n else:\n lower = theValues[len(theValues)//2-1]\n upper = theValues[len(theValues)//2]\n\n return (float(lower + upper)) / 2\n\n\n##################################################\n# plot\n\nimport matplotlib.pyplot as plt\n\ndef drawBox(x0,x1,y0,y1,col):\n plt.fill([x0, x1, x0], [y0, (y0+y1)/2, y1], fc=col[0], ec=col[1], linewidth=.5)\n\ndef drawText(x,y,text):\n plt.text(x,y,text,horizontalalignment='center',verticalalignment='center',fontsize=taskfontsize)\n\ndef drawTask(x0,x1,y0,y1,orgtext):\n drawBox(x0,x1,y0,y1,getColor(orgtext))\n text = getText(orgtext)\n if len(text) == 2:\n drawText((x0+x1)/2,(y0+y1)/2 + barHeight/4,text[0])\n drawText((x0+x1)/2,(y0+y1)/2 - barHeight/4,text[1])\n elif len(text) == 3:\n drawText((x0+x1)/2,(y0+y1)/2 + barHeight/4,text[0])\n drawText((x0+x1)/2,(y0+y1)/2,text[1])\n drawText((x0+x1)/2,(y0+y1)/2 - barHeight/4,text[2])\n elif len(text) == 1:\n drawText((x0+x1)/2,(y0+y1)/2,text[0])\n extra = getExtra(orgtext)\n if len(extra) > 0:\n drawText(x0, y1+height*0.1, extra)\n\ndef drawPlot(tasks):\n height = 1.0\n barHeight = height * 0.8\n\n for task in tasks:\n x0 = task['start']\n x1 = x0 + task['length']\n y0 = task['threadid'] * height - barHeight / 2.0\n y1 = y0 + barHeight\n drawTask(x0, x1, y0, y1, task['name'])\n\n padding = barHeight/2\n plt.ylim([ -barHeight/2 - padding, (numThreads-1)*height + barHeight/2 + padding]);\n yticks=range(0, numThreads)\n plt.yticks(yticks, yticks);\n plt.xlabel(r'Time')#,fontsize=labelfontsize)\n plt.ylabel(r'Thread')#,fontsize=labelfontsize)\n\n\n##################################################\n\nfilename = \"schedule.dat\"\nif len(sys.argv) > 1:\n filename = sys.argv[1]\n\n#taskfontsize = 16\n#labelfontsize = 16\n#tickfontsize = 16\ntimeScale = 1.0/1000000.0; # cycles -> Mcycles\n\ntasks = load_file(filename)\n\n# filter out uninteresting tasks\n\ntasks = [x for x in tasks if myfilter(x)]\n\n\n# normalize start time\n\nstarttime = min([x['start'] for x in tasks])\nfor task in tasks:\n task['start'] = task['start']-starttime\n task['end'] = task['end']-starttime\n\n# scale time\n\nfor task in tasks:\n task['start'] = task['start']*timeScale\n task['length'] = task['length']*timeScale\n task['end'] = task['end']*timeScale\n\n# sort by processor (must be numbered properly)\n\nprocids = dict(zip([x['procid'] for x in tasks], [x['procid'] for x in tasks]))\nnumProcs = len(procids)\ntasksperproc=[]\nfor i in sorted(procids):\n tasksperproc.append( [t for t in tasks if t['procid'] == i] )\n\n# true threadids -> logical threadids\n\nthreadsPerProc = 0\n\nfor i in procids:\n ltasks = tasksperproc[i]\n threadids = dict(zip([x['threadid'] for x in ltasks], [x['threadid'] for x in ltasks]))\n numThreads = len(threadids)\n threadsPerProc = max(numThreads, threadsPerProc)\n threadidmap = dict(zip(sorted(threadids.keys()), range(0, len(threadids))))\n for task in ltasks:\n task['threadid'] = threadidmap[task['threadid']]\n\n# calculate time wasted between tasks\n\nmtd = list()\n\nfor p in procids:\n for i in range(0, numThreads):\n t = [ x for x in tasksperproc[p] if x['threadid'] == i ];\n for j in range(1, len(t)):\n mtd.append( t[j]['start']-(t[j-1]['start']+t[j-1]['length']) )\n\ntotaltime = sum([x['length'] for x in tasks])\nendtime = max([x['end'] for x in tasks])\nprint('N= ', len(tasks), \\\n ' Total= ', totaltime, \\\n ' End= ', endtime, \\\n ' Par= ', \"{0:.2f}\".format(totaltime/float(endtime)), \\\n ' DistMin= ', min(mtd), \\\n ' DistMed= ', getMedian(mtd), \\\n ' perf= ', sum([x['cache'] for x in tasks]))\n\ndrawPlot(tasks)\nplt.show()\n"
]
| [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.fill",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
ttyskg/ProgrammingCompetition | [
"65fb9e131803e4f1a1a6369e68ed1b504f08b00f"
]
| [
"AtCoder/AGC/033/a.py"
]
| [
"#ref: https://atcoder.jp/contests/agc033/submissions/5260580\n\nimport sys\nimport numpy as np\n\ndef main():\n input = sys.stdin.readline\n H, W = map(int, input().split())\n INF = H * W\n dp = [[INF if c == '.' else 0 for c in input().strip()] for _ in range(H)]\n dp = np.array(dp)\n\n for i in range(1, H):\n dp[i, :] = np.minimum(dp[i, :], dp[i-1, :] + 1)\n\n for i in range(H-2, -1, -1):\n dp[i, :] = np.minimum(dp[i, :], dp[i+1, :] + 1)\n\n for i in range(1, W):\n dp[:, i] = np.minimum(dp[:, i], dp[:, i-1] + 1)\n\n for i in range(W-2, -1, -1):\n dp[:, i] = np.minimum(dp[:, i], dp[:, i+1] + 1)\n\n return np.max(dp)\n\nif __name__ == '__main__':\n print(main())\n"
]
| [
[
"numpy.max",
"numpy.array",
"numpy.minimum"
]
]
|
CirQ/DCGAN-tensorflow | [
"d58c84b9631b7364abb76274bfa1a06ea132f5c8"
]
| [
"model.py"
]
| [
"from __future__ import division\nimport os\nimport time\nimport math\nfrom glob import glob\nimport tensorflow as tf\nimport numpy as np\nfrom six.moves import xrange\n\nfrom ops import *\nfrom utils import *\n\ndef conv_out_size_same(size, stride):\n return int(math.ceil(float(size) / float(stride)))\n\nclass DCGAN(object):\n def __init__(self, sess, input_height=108, input_width=108, crop=True,\n batch_size=64, sample_num = 64, output_height=64, output_width=64,\n y_dim=None, z_dim=100, gf_dim=64, df_dim=64,\n gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default',\n input_fname_pattern='*.jpg', checkpoint_dir=None, sample_dir=None):\n \"\"\"\n\n Args:\n sess: TensorFlow session\n batch_size: The size of batch. Should be specified before training.\n y_dim: (optional) Dimension of dim for y. [None]\n z_dim: (optional) Dimension of dim for Z. [100]\n gf_dim: (optional) Dimension of gen filters in first conv layer. [64]\n df_dim: (optional) Dimension of discrim filters in first conv layer. [64]\n gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]\n dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]\n c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]\n \"\"\"\n self.sess = sess\n self.crop = crop\n\n self.batch_size = batch_size\n self.sample_num = sample_num\n\n self.input_height = input_height\n self.input_width = input_width\n self.output_height = output_height\n self.output_width = output_width\n\n self.y_dim = y_dim\n self.z_dim = z_dim\n\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n\n self.gfc_dim = gfc_dim\n self.dfc_dim = dfc_dim\n\n # batch normalization : deals with poor initialization helps gradient flow\n self.d_bn1 = batch_norm(name='d_bn1')\n self.d_bn2 = batch_norm(name='d_bn2')\n\n if not self.y_dim:\n self.d_bn3 = batch_norm(name='d_bn3')\n\n self.g_bn0 = batch_norm(name='g_bn0')\n self.g_bn1 = batch_norm(name='g_bn1')\n self.g_bn2 = batch_norm(name='g_bn2')\n\n if not self.y_dim:\n self.g_bn3 = batch_norm(name='g_bn3')\n\n self.dataset_name = dataset_name\n self.input_fname_pattern = input_fname_pattern\n self.checkpoint_dir = checkpoint_dir\n\n if self.dataset_name == 'mnist':\n self.data_X, self.data_y = self.load_mnist()\n self.c_dim = self.data_X[0].shape[-1]\n else:\n self.data = glob(os.path.join(\"./data\", self.dataset_name, self.input_fname_pattern))\n imreadImg = imread(self.data[0])\n if len(imreadImg.shape) >= 3: #check if image is a non-grayscale image by checking channel number\n self.c_dim = imread(self.data[0]).shape[-1]\n else:\n self.c_dim = 1\n\n self.grayscale = (self.c_dim == 1)\n\n self.build_model()\n\n def build_model(self):\n if self.y_dim:\n self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')\n else:\n self.y = None\n\n if self.crop:\n image_dims = [self.output_height, self.output_width, self.c_dim]\n else:\n image_dims = [self.input_height, self.input_width, self.c_dim]\n\n self.inputs = tf.placeholder(\n tf.float32, [self.batch_size] + image_dims, name='real_images')\n\n inputs = self.inputs\n\n self.z = tf.placeholder(\n tf.float32, [None, self.z_dim], name='z')\n self.z_sum = histogram_summary(\"z\", self.z)\n\n self.G = self.generator(self.z, self.y)\n self.D, self.D_logits = self.discriminator(inputs, self.y, reuse=False)\n self.sampler = self.sampler(self.z, self.y)\n self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)\n \n self.d_sum = histogram_summary(\"d\", self.D)\n self.d__sum = histogram_summary(\"d_\", self.D_)\n self.G_sum = image_summary(\"G\", self.G)\n\n def sigmoid_cross_entropy_with_logits(x, y):\n try:\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)\n except:\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)\n\n self.d_loss_real = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D)))\n self.d_loss_fake = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_)))\n self.g_loss = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_)))\n\n self.d_loss_real_sum = scalar_summary(\"d_loss_real\", self.d_loss_real)\n self.d_loss_fake_sum = scalar_summary(\"d_loss_fake\", self.d_loss_fake)\n \n self.d_loss = self.d_loss_real + self.d_loss_fake\n\n self.g_loss_sum = scalar_summary(\"g_loss\", self.g_loss)\n self.d_loss_sum = scalar_summary(\"d_loss\", self.d_loss)\n\n t_vars = tf.trainable_variables()\n\n self.d_vars = [var for var in t_vars if 'd_' in var.name]\n self.g_vars = [var for var in t_vars if 'g_' in var.name]\n\n self.saver = tf.train.Saver()\n\n def train(self, config):\n d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \\\n .minimize(self.d_loss, var_list=self.d_vars)\n g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \\\n .minimize(self.g_loss, var_list=self.g_vars)\n try:\n tf.global_variables_initializer().run()\n except:\n tf.initialize_all_variables().run()\n\n self.g_sum = merge_summary([self.z_sum, self.d__sum,\n self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])\n self.d_sum = merge_summary(\n [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])\n self.writer = SummaryWriter(\"./logs\", self.sess.graph)\n\n sample_z = np.random.uniform(-1, 1, size=(self.sample_num , self.z_dim))\n \n if config.dataset == 'mnist':\n sample_inputs = self.data_X[0:self.sample_num]\n sample_labels = self.data_y[0:self.sample_num]\n else:\n sample_files = self.data[0:self.sample_num]\n sample = [\n get_image(sample_file,\n input_height=self.input_height,\n input_width=self.input_width,\n resize_height=self.output_height,\n resize_width=self.output_width,\n crop=self.crop,\n grayscale=self.grayscale) for sample_file in sample_files]\n if (self.grayscale):\n sample_inputs = np.array(sample).astype(np.float32)[:, :, :, None]\n else:\n sample_inputs = np.array(sample).astype(np.float32)\n \n counter = 1\n start_time = time.time()\n could_load, checkpoint_counter = self.load(self.checkpoint_dir)\n if could_load:\n counter = checkpoint_counter\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n\n for epoch in xrange(config.epoch):\n if config.dataset == 'mnist':\n batch_idxs = min(len(self.data_X), config.train_size) // config.batch_size\n else: \n self.data = glob(os.path.join(\n \"./data\", config.dataset, self.input_fname_pattern))\n batch_idxs = min(len(self.data), config.train_size) // config.batch_size\n\n for idx in xrange(0, batch_idxs):\n if config.dataset == 'mnist':\n batch_images = self.data_X[idx*config.batch_size:(idx+1)*config.batch_size]\n batch_labels = self.data_y[idx*config.batch_size:(idx+1)*config.batch_size]\n else:\n batch_files = self.data[idx*config.batch_size:(idx+1)*config.batch_size]\n batch = [\n get_image(batch_file,\n input_height=self.input_height,\n input_width=self.input_width,\n resize_height=self.output_height,\n resize_width=self.output_width,\n crop=self.crop,\n grayscale=self.grayscale) for batch_file in batch_files]\n if self.grayscale:\n batch_images = np.array(batch).astype(np.float32)[:, :, :, None]\n else:\n batch_images = np.array(batch).astype(np.float32)\n\n batch_z = np.random.uniform(-1, 1, [config.batch_size, self.z_dim]) \\\n .astype(np.float32)\n\n if config.dataset == 'mnist':\n # Update D network\n _, summary_str = self.sess.run([d_optim, self.d_sum],\n feed_dict={ \n self.inputs: batch_images,\n self.z: batch_z,\n self.y:batch_labels,\n })\n self.writer.add_summary(summary_str, counter)\n\n # Update G network\n _, summary_str = self.sess.run([g_optim, self.g_sum],\n feed_dict={\n self.z: batch_z, \n self.y:batch_labels,\n })\n self.writer.add_summary(summary_str, counter)\n\n # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)\n _, summary_str = self.sess.run([g_optim, self.g_sum],\n feed_dict={ self.z: batch_z, self.y:batch_labels })\n self.writer.add_summary(summary_str, counter)\n \n errD_fake = self.d_loss_fake.eval({\n self.z: batch_z, \n self.y:batch_labels\n })\n errD_real = self.d_loss_real.eval({\n self.inputs: batch_images,\n self.y:batch_labels\n })\n errG = self.g_loss.eval({\n self.z: batch_z,\n self.y: batch_labels\n })\n else:\n # Update D network\n _, summary_str = self.sess.run([d_optim, self.d_sum],\n feed_dict={ self.inputs: batch_images, self.z: batch_z })\n self.writer.add_summary(summary_str, counter)\n\n # Update G network\n _, summary_str = self.sess.run([g_optim, self.g_sum],\n feed_dict={ self.z: batch_z })\n self.writer.add_summary(summary_str, counter)\n\n # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)\n _, summary_str = self.sess.run([g_optim, self.g_sum],\n feed_dict={ self.z: batch_z })\n self.writer.add_summary(summary_str, counter)\n \n errD_fake = self.d_loss_fake.eval({ self.z: batch_z })\n errD_real = self.d_loss_real.eval({ self.inputs: batch_images })\n errG = self.g_loss.eval({self.z: batch_z})\n\n counter += 1\n print(\"Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f\" \\\n % (epoch, config.epoch, idx, batch_idxs,\n time.time() - start_time, errD_fake+errD_real, errG))\n\n if np.mod(counter, 100) == 1:\n if config.dataset == 'mnist':\n samples, d_loss, g_loss = self.sess.run(\n [self.sampler, self.d_loss, self.g_loss],\n feed_dict={\n self.z: sample_z,\n self.inputs: sample_inputs,\n self.y:sample_labels,\n }\n )\n save_images(samples, image_manifold_size(samples.shape[0]),\n './{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))\n print(\"[Sample] d_loss: %.8f, g_loss: %.8f\" % (d_loss, g_loss)) \n else:\n try:\n samples, d_loss, g_loss = self.sess.run(\n [self.sampler, self.d_loss, self.g_loss],\n feed_dict={\n self.z: sample_z,\n self.inputs: sample_inputs,\n },\n )\n save_images(samples, image_manifold_size(samples.shape[0]),\n './{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))\n print(\"[Sample] d_loss: %.8f, g_loss: %.8f\" % (d_loss, g_loss)) \n except:\n print(\"one pic error!...\")\n\n if np.mod(counter, 500) == 2:\n self.save(config.checkpoint_dir, counter)\n\n def discriminator(self, image, y=None, reuse=False):\n with tf.variable_scope(\"discriminator\") as scope:\n if reuse:\n scope.reuse_variables()\n\n if not self.y_dim:\n h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))\n h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))\n h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))\n h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))\n h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')\n\n return tf.nn.sigmoid(h4), h4\n else:\n yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])\n x = conv_cond_concat(image, yb)\n\n h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))\n h0 = conv_cond_concat(h0, yb)\n\n h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv')))\n h1 = tf.reshape(h1, [self.batch_size, -1]) \n h1 = concat([h1, y], 1)\n \n h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin')))\n h2 = concat([h2, y], 1)\n\n h3 = linear(h2, 1, 'd_h3_lin')\n \n return tf.nn.sigmoid(h3), h3\n\n def generator(self, z, y=None):\n with tf.variable_scope(\"generator\") as scope:\n if not self.y_dim:\n s_h, s_w = self.output_height, self.output_width\n s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)\n s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)\n s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)\n s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)\n\n # project `z` and reshape\n self.z_, self.h0_w, self.h0_b = linear(\n z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin', with_w=True)\n\n self.h0 = tf.reshape(\n self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])\n h0 = tf.nn.relu(self.g_bn0(self.h0))\n\n self.h1, self.h1_w, self.h1_b = deconv2d(\n h0, [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1', with_w=True)\n h1 = tf.nn.relu(self.g_bn1(self.h1))\n\n h2, self.h2_w, self.h2_b = deconv2d(\n h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2', with_w=True)\n h2 = tf.nn.relu(self.g_bn2(h2))\n\n h3, self.h3_w, self.h3_b = deconv2d(\n h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3', with_w=True)\n h3 = tf.nn.relu(self.g_bn3(h3))\n\n h4, self.h4_w, self.h4_b = deconv2d(\n h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4', with_w=True)\n\n return tf.nn.tanh(h4)\n else:\n s_h, s_w = self.output_height, self.output_width\n s_h2, s_h4 = int(s_h/2), int(s_h/4)\n s_w2, s_w4 = int(s_w/2), int(s_w/4)\n\n # yb = tf.expand_dims(tf.expand_dims(y, 1),2)\n yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])\n z = concat([z, y], 1)\n\n h0 = tf.nn.relu(\n self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin')))\n h0 = concat([h0, y], 1)\n\n h1 = tf.nn.relu(self.g_bn1(\n linear(h0, self.gf_dim*2*s_h4*s_w4, 'g_h1_lin')))\n h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])\n\n h1 = conv_cond_concat(h1, yb)\n\n h2 = tf.nn.relu(self.g_bn2(deconv2d(h1,\n [self.batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2')))\n h2 = conv_cond_concat(h2, yb)\n\n return tf.nn.sigmoid(\n deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3'))\n\n def sampler(self, z, y=None):\n with tf.variable_scope(\"generator\") as scope:\n scope.reuse_variables()\n\n if not self.y_dim:\n s_h, s_w = self.output_height, self.output_width\n s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)\n s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)\n s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)\n s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)\n\n # project `z` and reshape\n h0 = tf.reshape(\n linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'),\n [-1, s_h16, s_w16, self.gf_dim * 8])\n h0 = tf.nn.relu(self.g_bn0(h0, train=False))\n\n h1 = deconv2d(h0, [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1')\n h1 = tf.nn.relu(self.g_bn1(h1, train=False))\n\n h2 = deconv2d(h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2')\n h2 = tf.nn.relu(self.g_bn2(h2, train=False))\n\n h3 = deconv2d(h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3')\n h3 = tf.nn.relu(self.g_bn3(h3, train=False))\n\n h4 = deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4')\n\n return tf.nn.tanh(h4)\n else:\n s_h, s_w = self.output_height, self.output_width\n s_h2, s_h4 = int(s_h/2), int(s_h/4)\n s_w2, s_w4 = int(s_w/2), int(s_w/4)\n\n # yb = tf.reshape(y, [-1, 1, 1, self.y_dim])\n yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])\n z = concat([z, y], 1)\n\n h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False))\n h0 = concat([h0, y], 1)\n\n h1 = tf.nn.relu(self.g_bn1(\n linear(h0, self.gf_dim*2*s_h4*s_w4, 'g_h1_lin'), train=False))\n h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])\n h1 = conv_cond_concat(h1, yb)\n\n h2 = tf.nn.relu(self.g_bn2(\n deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2'), train=False))\n h2 = conv_cond_concat(h2, yb)\n\n return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3'))\n\n def load_mnist(self):\n data_dir = os.path.join(\"./data\", self.dataset_name)\n \n fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n trX = loaded[16:].reshape((60000,28,28,1)).astype(np.float)\n\n fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n trY = loaded[8:].reshape((60000)).astype(np.float)\n\n fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float)\n\n fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n teY = loaded[8:].reshape((10000)).astype(np.float)\n\n trY = np.asarray(trY)\n teY = np.asarray(teY)\n \n X = np.concatenate((trX, teX), axis=0)\n y = np.concatenate((trY, teY), axis=0).astype(np.int)\n \n seed = 547\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n \n y_vec = np.zeros((len(y), self.y_dim), dtype=np.float)\n for i, label in enumerate(y):\n y_vec[i,y[i]] = 1.0\n \n return X/255.,y_vec\n\n @property\n def model_dir(self):\n return \"{}_{}_{}_{}\".format(\n self.dataset_name, self.batch_size,\n self.output_height, self.output_width)\n \n def save(self, checkpoint_dir, step):\n model_name = \"DCGAN.model\"\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)\n\n def load(self, checkpoint_dir):\n import re\n print(\" [*] Reading checkpoints...\")\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\",ckpt_name)).group(0))\n print(\" [*] Success to read {}\".format(ckpt_name))\n return True, counter\n else:\n print(\" [*] Failed to find a checkpoint\")\n return False, 0\n"
]
| [
[
"tensorflow.ones_like",
"tensorflow.train.get_checkpoint_state",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.nn.tanh",
"tensorflow.global_variables_initializer",
"numpy.concatenate",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.variable_scope",
"tensorflow.nn.sigmoid",
"numpy.mod",
"numpy.array",
"tensorflow.train.AdamOptimizer",
"tensorflow.initialize_all_variables",
"numpy.random.shuffle",
"tensorflow.placeholder",
"numpy.fromfile",
"numpy.asarray",
"numpy.random.seed",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"numpy.random.uniform"
]
]
|
Yash-10/yt | [
"023680e3a7bd1000d601727e02a55e72b4cbdc75"
]
| [
"yt/geometry/coordinates/cylindrical_coordinates.py"
]
| [
"import numpy as np\n\nfrom yt.utilities.lib.pixelization_routines import pixelize_cartesian, pixelize_cylinder\n\nfrom .coordinate_handler import (\n CoordinateHandler,\n _get_coord_fields,\n _setup_dummy_cartesian_coords_and_widths,\n _setup_polar_coordinates,\n cartesian_to_cylindrical,\n cylindrical_to_cartesian,\n)\n\n#\n# Cylindrical fields\n#\n\n\nclass CylindricalCoordinateHandler(CoordinateHandler):\n name = \"cylindrical\"\n\n def __init__(self, ds, ordering=(\"r\", \"z\", \"theta\")):\n super().__init__(ds, ordering)\n self.image_units = {}\n self.image_units[self.axis_id[\"r\"]] = (\"rad\", None)\n self.image_units[self.axis_id[\"theta\"]] = (None, None)\n self.image_units[self.axis_id[\"z\"]] = (None, None)\n\n def setup_fields(self, registry):\n # Missing implementation for x and y coordinates.\n _setup_dummy_cartesian_coords_and_widths(registry, axes=(\"x\", \"y\"))\n _setup_polar_coordinates(registry, self.axis_id)\n\n f1, f2 = _get_coord_fields(self.axis_id[\"z\"])\n registry.add_field(\n (\"index\", \"dz\"),\n sampling_type=\"cell\",\n function=f1,\n display_field=False,\n units=\"code_length\",\n )\n\n registry.add_field(\n (\"index\", \"z\"),\n sampling_type=\"cell\",\n function=f2,\n display_field=False,\n units=\"code_length\",\n )\n\n def _CylindricalVolume(field, data):\n r = data[\"index\", \"r\"]\n dr = data[\"index\", \"dr\"]\n vol = 0.5 * ((r + 0.5 * dr) ** 2 - (r - 0.5 * dr) ** 2)\n vol *= data[\"index\", \"dtheta\"]\n vol *= data[\"index\", \"dz\"]\n return vol\n\n registry.add_field(\n (\"index\", \"cell_volume\"),\n sampling_type=\"cell\",\n function=_CylindricalVolume,\n units=\"code_length**3\",\n )\n registry.alias((\"index\", \"volume\"), (\"index\", \"cell_volume\"))\n\n def _path_z(field, data):\n return data[\"index\", \"dz\"]\n\n registry.add_field(\n (\"index\", \"path_element_z\"),\n sampling_type=\"cell\",\n function=_path_z,\n units=\"code_length\",\n )\n\n def pixelize(\n self,\n dimension,\n data_source,\n field,\n bounds,\n size,\n antialias=True,\n periodic=False,\n ):\n # Note that above, we set periodic by default to be *false*. This is\n # because our pixelizers, at present, do not handle periodicity\n # correctly, and if you change the \"width\" of a cylindrical plot, it\n # double-counts in the edge buffers. See, for instance, issue 1669.\n ax_name = self.axis_name[dimension]\n if ax_name in (\"r\", \"theta\"):\n return self._ortho_pixelize(\n data_source, field, bounds, size, antialias, dimension, periodic\n )\n elif ax_name == \"z\":\n return self._cyl_pixelize(data_source, field, bounds, size, antialias)\n else:\n # Pixelizing along a cylindrical surface is a bit tricky\n raise NotImplementedError\n\n def pixelize_line(self, field, start_point, end_point, npoints):\n raise NotImplementedError\n\n def _ortho_pixelize(\n self, data_source, field, bounds, size, antialias, dim, periodic\n ):\n period = self.period[:2].copy() # dummy here\n period[0] = self.period[self.x_axis[dim]]\n period[1] = self.period[self.y_axis[dim]]\n if hasattr(period, \"in_units\"):\n period = period.in_units(\"code_length\").d\n buff = np.zeros(size, dtype=\"f8\")\n pixelize_cartesian(\n buff,\n data_source[\"px\"],\n data_source[\"py\"],\n data_source[\"pdx\"],\n data_source[\"pdy\"],\n data_source[field],\n bounds,\n int(antialias),\n period,\n int(periodic),\n )\n return buff\n\n def _cyl_pixelize(self, data_source, field, bounds, size, antialias):\n buff = np.full((size[1], size[0]), np.nan, dtype=\"f8\")\n pixelize_cylinder(\n buff,\n data_source[\"px\"],\n data_source[\"pdx\"],\n data_source[\"py\"],\n data_source[\"pdy\"],\n data_source[field],\n bounds,\n )\n self.sanitize_buffer_fill_values(buff)\n return buff\n\n _x_pairs = ((\"r\", \"theta\"), (\"z\", \"r\"), (\"theta\", \"r\"))\n _y_pairs = ((\"r\", \"z\"), (\"z\", \"theta\"), (\"theta\", \"z\"))\n\n _image_axis_name = None\n\n @property\n def image_axis_name(self):\n if self._image_axis_name is not None:\n return self._image_axis_name\n # This is the x and y axes labels that get displayed. For\n # non-Cartesian coordinates, we usually want to override these for\n # Cartesian coordinates, since we transform them.\n rv = {\n self.axis_id[\"r\"]: (\"theta\", \"z\"),\n self.axis_id[\"z\"]: (\"x\", \"y\"),\n self.axis_id[\"theta\"]: (\"r\", \"z\"),\n }\n for i in list(rv.keys()):\n rv[self.axis_name[i]] = rv[i]\n rv[self.axis_name[i].upper()] = rv[i]\n self._image_axis_name = rv\n return rv\n\n def convert_from_cartesian(self, coord):\n return cartesian_to_cylindrical(coord)\n\n def convert_to_cartesian(self, coord):\n return cylindrical_to_cartesian(coord)\n\n def convert_to_cylindrical(self, coord):\n return coord\n\n def convert_from_cylindrical(self, coord):\n return coord\n\n def convert_to_spherical(self, coord):\n raise NotImplementedError\n\n def convert_from_spherical(self, coord):\n raise NotImplementedError\n\n @property\n def period(self):\n return np.array([0.0, 0.0, 2.0 * np.pi])\n\n def sanitize_center(self, center, axis):\n center, display_center = super().sanitize_center(center, axis)\n display_center = [\n 0.0 * display_center[0],\n 0.0 * display_center[1],\n 0.0 * display_center[2],\n ]\n ax_name = self.axis_name[axis]\n r_ax = self.axis_id[\"r\"]\n theta_ax = self.axis_id[\"theta\"]\n z_ax = self.axis_id[\"z\"]\n if ax_name == \"r\":\n display_center[theta_ax] = self.ds.domain_center[theta_ax]\n display_center[z_ax] = self.ds.domain_center[z_ax]\n elif ax_name == \"theta\":\n # use existing center value\n for idx in (r_ax, z_ax):\n display_center[idx] = center[idx]\n return center, display_center\n\n def sanitize_width(self, axis, width, depth):\n name = self.axis_name[axis]\n r_ax, theta_ax, z_ax = (\n self.ds.coordinates.axis_id[ax] for ax in (\"r\", \"theta\", \"z\")\n )\n if width is not None:\n width = super().sanitize_width(axis, width, depth)\n # Note: regardless of axes, these are set up to give consistent plots\n # when plotted, which is not strictly a \"right hand rule\" for axes.\n elif name == \"r\": # soup can label\n width = [2.0 * np.pi * self.ds.domain_width.uq, self.ds.domain_width[z_ax]]\n elif name == \"theta\":\n width = [self.ds.domain_width[r_ax], self.ds.domain_width[z_ax]]\n elif name == \"z\":\n width = [\n 2.0 * self.ds.domain_right_edge[r_ax],\n 2.0 * self.ds.domain_right_edge[r_ax],\n ]\n return width\n"
]
| [
[
"numpy.full",
"numpy.array",
"numpy.zeros"
]
]
|
furaga/mmpose | [
"1ec4ff329e6e3d58a2bc221b0b4b70eed6a4cf0e"
]
| [
"tests/test_pipelines/test_bottom_up_pipelines.py"
]
| [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\n\nimport numpy as np\nimport pytest\nimport xtcocotools\nfrom xtcocotools.coco import COCO\n\nfrom mmpose.datasets.pipelines import (BottomUpGenerateHeatmapTarget,\n BottomUpGeneratePAFTarget,\n BottomUpGenerateTarget,\n BottomUpGetImgSize,\n BottomUpRandomAffine,\n BottomUpRandomFlip, BottomUpResizeAlign,\n LoadImageFromFile)\n\n\ndef _get_mask(coco, anno, img_id):\n img_info = coco.loadImgs(img_id)[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if obj['iscrowd']:\n rle = xtcocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += xtcocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = xtcocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += xtcocotools.mask.decode(rle)\n\n return m < 0.5\n\n\ndef _get_joints(anno, ann_info, int_sigma):\n num_people = len(anno)\n\n if ann_info['scale_aware_sigma']:\n joints = np.zeros((num_people, ann_info['num_joints'], 4),\n dtype=np.float32)\n else:\n joints = np.zeros((num_people, ann_info['num_joints'], 3),\n dtype=np.float32)\n\n for i, obj in enumerate(anno):\n joints[i, :ann_info['num_joints'], :3] = \\\n np.array(obj['keypoints']).reshape([-1, 3])\n if ann_info['scale_aware_sigma']:\n # get person box\n box = obj['bbox']\n size = max(box[2], box[3])\n sigma = size / 256 * 2\n if int_sigma:\n sigma = int(np.ceil(sigma))\n assert sigma > 0, sigma\n joints[i, :, 3] = sigma\n\n return joints\n\n\ndef _check_flip(origin_imgs, result_imgs):\n \"\"\"Check if the origin_imgs are flipped correctly.\"\"\"\n h, w, c = origin_imgs.shape\n for i in range(h):\n for j in range(w):\n for k in range(c):\n if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:\n return False\n return True\n\n\ndef test_bottomup_pipeline():\n\n data_prefix = 'tests/data/coco/'\n ann_file = osp.join(data_prefix, 'test_coco.json')\n coco = COCO(ann_file)\n\n ann_info = {}\n ann_info['flip_pairs'] = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],\n [11, 12], [13, 14], [15, 16]]\n ann_info['flip_index'] = [\n 0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15\n ]\n\n ann_info['use_different_joint_weights'] = False\n ann_info['joint_weights'] = np.array([\n 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5,\n 1.5\n ],\n dtype=np.float32).reshape((17, 1))\n ann_info['image_size'] = np.array(512)\n ann_info['heatmap_size'] = np.array([128, 256])\n ann_info['num_joints'] = 17\n ann_info['num_scales'] = 2\n ann_info['scale_aware_sigma'] = False\n\n ann_ids = coco.getAnnIds(785)\n anno = coco.loadAnns(ann_ids)\n mask = _get_mask(coco, anno, 785)\n\n anno = [\n obj for obj in anno if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0\n ]\n joints = _get_joints(anno, ann_info, False)\n\n mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]\n joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]\n\n results = {}\n results['dataset'] = 'coco'\n results['image_file'] = osp.join(data_prefix, '000000000785.jpg')\n results['mask'] = mask_list\n results['joints'] = joints_list\n results['ann_info'] = ann_info\n\n transform = LoadImageFromFile()\n results = transform(copy.deepcopy(results))\n assert results['img'].shape == (425, 640, 3)\n\n # test HorizontalFlip\n random_horizontal_flip = BottomUpRandomFlip(flip_prob=1.)\n results_horizontal_flip = random_horizontal_flip(copy.deepcopy(results))\n assert _check_flip(results['img'], results_horizontal_flip['img'])\n\n random_horizontal_flip = BottomUpRandomFlip(flip_prob=0.)\n results_horizontal_flip = random_horizontal_flip(copy.deepcopy(results))\n assert (results['img'] == results_horizontal_flip['img']).all()\n\n results_copy = copy.deepcopy(results)\n results_copy['mask'] = mask_list[0]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_horizontal_flip(\n copy.deepcopy(results_copy))\n\n results_copy = copy.deepcopy(results)\n results_copy['joints'] = joints_list[0]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_horizontal_flip(\n copy.deepcopy(results_copy))\n\n results_copy = copy.deepcopy(results)\n results_copy['joints'] = joints_list[:1]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_horizontal_flip(\n copy.deepcopy(results_copy))\n\n results_copy = copy.deepcopy(results)\n results_copy['mask'] = mask_list[:1]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_horizontal_flip(\n copy.deepcopy(results_copy))\n\n # test TopDownAffine\n random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'short', 0)\n results_affine_transform = random_affine_transform(copy.deepcopy(results))\n assert results_affine_transform['img'].shape == (512, 512, 3)\n\n random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'short',\n 40)\n results_affine_transform = random_affine_transform(copy.deepcopy(results))\n assert results_affine_transform['img'].shape == (512, 512, 3)\n\n results_copy = copy.deepcopy(results)\n results_copy['ann_info']['scale_aware_sigma'] = True\n joints = _get_joints(anno, results_copy['ann_info'], False)\n results_copy['joints'] = \\\n [joints.copy() for _ in range(results_copy['ann_info']['num_scales'])]\n results_affine_transform = random_affine_transform(results_copy)\n assert results_affine_transform['img'].shape == (512, 512, 3)\n\n results_copy = copy.deepcopy(results)\n results_copy['mask'] = mask_list[0]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_affine_transform(\n copy.deepcopy(results_copy))\n\n results_copy = copy.deepcopy(results)\n results_copy['joints'] = joints_list[0]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_affine_transform(\n copy.deepcopy(results_copy))\n\n results_copy = copy.deepcopy(results)\n results_copy['joints'] = joints_list[:1]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_affine_transform(\n copy.deepcopy(results_copy))\n\n results_copy = copy.deepcopy(results)\n results_copy['mask'] = mask_list[:1]\n with pytest.raises(AssertionError):\n results_horizontal_flip = random_affine_transform(\n copy.deepcopy(results_copy))\n\n random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'long', 40)\n results_affine_transform = random_affine_transform(copy.deepcopy(results))\n assert results_affine_transform['img'].shape == (512, 512, 3)\n\n with pytest.raises(ValueError):\n random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5],\n 'short-long', 40)\n results_affine_transform = random_affine_transform(\n copy.deepcopy(results))\n\n # test BottomUpGenerateTarget\n generate_multi_target = BottomUpGenerateTarget(2, 30)\n results_generate_multi_target = generate_multi_target(\n copy.deepcopy(results))\n assert 'targets' in results_generate_multi_target\n assert len(results_generate_multi_target['targets']\n ) == results['ann_info']['num_scales']\n\n # test BottomUpGetImgSize when W > H\n get_multi_scale_size = BottomUpGetImgSize([1])\n results_get_multi_scale_size = get_multi_scale_size(copy.deepcopy(results))\n assert 'test_scale_factor' in results_get_multi_scale_size['ann_info']\n assert 'base_size' in results_get_multi_scale_size['ann_info']\n assert 'center' in results_get_multi_scale_size['ann_info']\n assert 'scale' in results_get_multi_scale_size['ann_info']\n assert results_get_multi_scale_size['ann_info']['base_size'][1] == 512\n\n # test BottomUpResizeAlign\n transforms = [\n dict(type='ToTensor'),\n dict(\n type='NormalizeTensor',\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ]\n resize_align_multi_scale = BottomUpResizeAlign(transforms=transforms)\n results_copy = copy.deepcopy(results_get_multi_scale_size)\n results_resize_align_multi_scale = resize_align_multi_scale(results_copy)\n assert 'aug_data' in results_resize_align_multi_scale['ann_info']\n\n # test BottomUpGetImgSize when W < H\n results_copy = copy.deepcopy(results)\n results_copy['img'] = np.random.rand(640, 425, 3)\n results_get_multi_scale_size = get_multi_scale_size(results_copy)\n assert results_get_multi_scale_size['ann_info']['base_size'][0] == 512\n\n\ndef test_BottomUpGenerateHeatmapTarget():\n\n data_prefix = 'tests/data/coco/'\n ann_file = osp.join(data_prefix, 'test_coco.json')\n coco = COCO(ann_file)\n\n ann_info = {}\n ann_info['heatmap_size'] = np.array([128, 256])\n ann_info['num_joints'] = 17\n ann_info['num_scales'] = 2\n ann_info['scale_aware_sigma'] = False\n\n ann_ids = coco.getAnnIds(785)\n anno = coco.loadAnns(ann_ids)\n mask = _get_mask(coco, anno, 785)\n\n anno = [\n obj for obj in anno if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0\n ]\n joints = _get_joints(anno, ann_info, False)\n\n mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]\n joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]\n\n results = {}\n results['dataset'] = 'coco'\n results['image_file'] = osp.join(data_prefix, '000000000785.jpg')\n results['mask'] = mask_list\n results['joints'] = joints_list\n results['ann_info'] = ann_info\n\n generate_heatmap_target = BottomUpGenerateHeatmapTarget(2)\n results_generate_heatmap_target = generate_heatmap_target(results)\n assert 'target' in results_generate_heatmap_target\n assert len(results_generate_heatmap_target['target']\n ) == results['ann_info']['num_scales']\n\n\ndef test_BottomUpGeneratePAFTarget():\n\n ann_info = {}\n ann_info['skeleton'] = [[1, 2], [3, 4]]\n ann_info['heatmap_size'] = np.array([5])\n ann_info['num_joints'] = 4\n ann_info['num_scales'] = 1\n\n mask = np.ones((5, 5), dtype=bool)\n joints = np.array([[[1, 1, 2], [3, 3, 2], [0, 0, 0], [0, 0, 0]],\n [[1, 3, 2], [3, 1, 2], [0, 0, 0], [0, 0, 0]]])\n\n mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]\n joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]\n\n results = {}\n results['dataset'] = 'coco'\n results['mask'] = mask_list\n results['joints'] = joints_list\n results['ann_info'] = ann_info\n\n generate_paf_target = BottomUpGeneratePAFTarget(1)\n results_generate_paf_target = generate_paf_target(results)\n sqrt = np.sqrt(2) / 2\n assert (results_generate_paf_target['target'] == np.array(\n [[[sqrt, sqrt, 0, sqrt, sqrt], [sqrt, sqrt, sqrt, sqrt, sqrt],\n [0, sqrt, sqrt, sqrt, 0], [sqrt, sqrt, sqrt, sqrt, sqrt],\n [sqrt, sqrt, 0, sqrt, sqrt]],\n [[sqrt, sqrt, 0, -sqrt, -sqrt], [sqrt, sqrt, 0, -sqrt, -sqrt],\n [0, 0, 0, 0, 0], [-sqrt, -sqrt, 0, sqrt, sqrt],\n [-sqrt, -sqrt, 0, sqrt, sqrt]],\n [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]],\n [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]],\n dtype=np.float32)).all()\n"
]
| [
[
"numpy.array",
"numpy.ceil",
"numpy.random.rand",
"numpy.zeros",
"numpy.ones",
"numpy.sqrt"
]
]
|
LaudateCorpus1/lingvo | [
"5c69d8afbffca222cf9d357d54e0eb4d09e92764"
]
| [
"lingvo/core/tpu_embedding_layers.py"
]
| [
"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TPU embedding layers.\"\"\"\n\nimport math\n\nimport lingvo.compat as tf\nfrom lingvo.core import base_layer\nfrom lingvo.core import hyperparams\nfrom lingvo.core import py_utils\nfrom lingvo.core import schedule\n\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.tpu import tpu_embedding as tpu_embedding_lib\n# pylint:enable=g-direct-tensorflow-import\n\n\ndef _IsTpuTraining(p):\n \"\"\"Whether we should create embedding tables and run lookup on tpu.\"\"\"\n return not p.is_inference and py_utils.use_tpu()\n\n\ndef _RemovePrivateVar(layer, var_name):\n \"\"\"Remove a variable by name from `layer`.\n\n This is usually used to avoid copying the variable to TPU, for example, by the\n tf.cast when accessing layer.theta.\n\n Args:\n layer: The layer to remove the variable from.\n var_name: The name of the variable to remove.\n \"\"\"\n # pylint: disable=protected-access\n del layer._private_vars[var_name]\n del layer._private_theta[var_name]\n # pylint: enable=protected-access\n\n\nclass TpuEmbeddingCollection:\n \"\"\"Manage various TPU embedding related ops and tensors.\"\"\"\n\n @classmethod\n def Get(cls):\n \"\"\"Returns the TpuEmbeddingCollection associated with the current graph.\"\"\"\n emb_collection = py_utils.GetTpuEmbeddingGraphCollection()\n assert len(emb_collection) <= 1\n if len(emb_collection) == 1:\n tf.logging.info(\n 'TpuEmbeddingCollection singleton already exists, reusing')\n return emb_collection[0]\n else:\n singleton = cls()\n emb_collection.append(singleton)\n return singleton\n\n def __init__(self):\n # Maps table name to a tuple (var_list, is_inference_with_bfloat16), where\n # var_list is the list of variables for the corresponding table, and\n # is_inference_with_bfloat16 is a boolean telling whether this table is\n # using bfloat16 for inference.\n self._table_vars = py_utils.NestedMap()\n\n # The TPUEmbedding configuration.\n self._tpu_embedding = None\n\n # Maps table name to the list of ops that loads/retrieves embedding tables\n # to/from TPU.\n self._load_ops_map = py_utils.NestedMap()\n self._retrieve_ops_map = py_utils.NestedMap()\n\n # Maps task name to the (feature_name -> activation_tensor) dict for the\n # corresponding task.\n self._activations_by_task = {}\n\n # List of (name, value, weight) tuples for summary.\n self._summary_tensors = []\n\n # Set of embedding feature names.\n self._feature_names = None\n\n # Schedule for the value that is used as TPU embedding gradient multiplier.\n self._gradient_multiplier_schedule = None\n\n # Maps task name to the mode used by that task.\n self._mode_by_task = {}\n\n # Maps task name to the send gradient op for that task. Mainly used to\n # ensure that send gradient op is created only once for each task.\n self._send_gradient_op_by_task = {}\n\n def AddTableVariables(self, table_name, var_list, is_inference_with_bfloat16):\n \"\"\"Add TPU embedding table variable list to the collection.\"\"\"\n if table_name in self._table_vars:\n raise ValueError(f'Variables for table {table_name} already exist.')\n self._table_vars[table_name] = (var_list, is_inference_with_bfloat16)\n\n @property\n def table_variables(self):\n \"\"\"Returns a list of table variables.\"\"\"\n return self._table_vars.Transform(lambda val: val[0])\n\n @property\n def inference_with_bfloat16_var_names(self):\n \"\"\"Returns a list of names of table variables that do bfloat16 inference.\"\"\"\n result = []\n for var_list, is_inference_with_bfloat16 in self._table_vars.values():\n if is_inference_with_bfloat16:\n result += [v.op.name for v in var_list]\n return result\n\n @property\n def tpu_embedding(self):\n return self._tpu_embedding\n\n @tpu_embedding.setter\n def tpu_embedding(self, tpu_embedding):\n if self._tpu_embedding is not None:\n raise ValueError('TPUEmbedding already set before.')\n self._tpu_embedding = tpu_embedding\n\n def AddLoadRetrieveOps(self, table_name, load_ops, retrieve_ops):\n if table_name in self._load_ops_map:\n raise ValueError(f'Load ops for table {table_name} already exist.')\n assert table_name not in self._retrieve_ops_map\n self._load_ops_map[table_name] = load_ops\n self._retrieve_ops_map[table_name] = retrieve_ops\n\n @property\n def load_ops(self):\n return self._load_ops_map\n\n @property\n def retrieve_ops(self):\n return self._retrieve_ops_map\n\n def _ValidateTaskScope(self, task_call_scope):\n if not task_call_scope:\n raise ValueError(\n 'It expects a non-empty task call scope name, but get '\n f'{task_call_scope}. This usually means the current code is not run '\n 'under a py_utils.TaskCallScope() context.')\n\n def AddActivations(self, task_call_scope):\n self._ValidateTaskScope(task_call_scope)\n tf.logging.info(\n f'Adding TPU embedding activations for task {task_call_scope}.')\n if task_call_scope not in self._activations_by_task:\n activations = self._tpu_embedding.get_activations()\n self._activations_by_task[task_call_scope] = activations\n return self._activations_by_task[task_call_scope]\n\n def GetActivations(self, task_call_scope):\n tf.logging.info(\n f'Getting TPU embedding activations for task {task_call_scope}.')\n if task_call_scope in self._activations_by_task:\n self._ValidateTaskScope(task_call_scope)\n return self._activations_by_task[task_call_scope]\n return None\n\n def AddSummaryTensor(self, name, value, weight=1.0):\n self._summary_tensors.append((name, value, tf.convert_to_tensor(weight)))\n\n @property\n def summary_tensors(self):\n return self._summary_tensors\n\n @property\n def feature_names(self):\n return self._feature_names\n\n @feature_names.setter\n def feature_names(self, feature_names):\n if self._feature_names and self._feature_names != feature_names:\n raise ValueError('feature_names already exists. '\n f'Existing feature names: {self._feature_names}, '\n f'feature names being added: {feature_names}')\n self._feature_names = feature_names\n\n def SetGradientMultiplierSchedule(self, multiplier_schedule):\n if self._gradient_multiplier_schedule is not None:\n raise ValueError('gradient_multiplier_schedule was set before.')\n self._gradient_multiplier_schedule = multiplier_schedule\n\n def SetTaskMode(self, task_call_scope, mode):\n self._ValidateTaskScope(task_call_scope)\n tf.logging.info(\n f'Setting TPU embedding mode for task {task_call_scope} as {mode}.')\n self._mode_by_task[task_call_scope] = mode\n\n def ShouldStopGradient(self, task_call_scope):\n self._ValidateTaskScope(task_call_scope)\n if task_call_scope not in self._mode_by_task:\n raise ValueError(\n f'TPU embedding mode for task {task_call_scope} not found.')\n should_stop_gradient = (self._mode_by_task[task_call_scope] != 'train')\n tf.logging.info(('Disabled' if should_stop_gradient else 'Enabled') +\n f' TPU embedding gradient for task {task_call_scope}.')\n return should_stop_gradient\n\n def ApplyGradients(self, task_call_scope, feature_to_gradient_dict):\n \"\"\"Apply tpu embedding gradient updates.\n\n Args:\n task_call_scope: The current task call scope name.\n feature_to_gradient_dict: A `py_utils.NestedMap` of: tpu embedding feature\n name -> gradient tensor for the embedding feature.\n\n Returns:\n The gradient update op and a dict of eval metrics.\n\n Raises:\n ValueError: if gradients have been applied before for the current task.\n \"\"\"\n self._ValidateTaskScope(task_call_scope)\n if task_call_scope in self._send_gradient_op_by_task:\n raise ValueError(\n f'Send gradient op for task {task_call_scope} already exist.')\n tf.logging.info(\n f'Applying TPU embedding gradients for task {task_call_scope}.')\n\n # Apply gradient multiplier schedule.\n grad_multiplier = self._gradient_multiplier_schedule.Value()\n feature_to_gradient_dict = feature_to_gradient_dict.Transform(\n lambda g: g * grad_multiplier)\n\n send_gradient_op = (\n self._tpu_embedding.generate_send_gradients_op(\n feature_to_gradient_dict, step=py_utils.GetGlobalStep()))\n self._send_gradient_op_by_task[task_call_scope] = send_gradient_op\n\n activations = self.GetActivations(task_call_scope).values()\n eval_metrics = {\n 'tpu_embedding_activation_norm':\n (tf.sqrt(py_utils.SumSquared(activations)), tf.constant(1.0)),\n 'tpu_embedding_grad_norm':\n (tf.sqrt(py_utils.SumSquared(feature_to_gradient_dict.Flatten())),\n tf.constant(1.0)),\n 'tpu_embedding_gradient_multiplier':\n (grad_multiplier, tf.constant(1.0)),\n }\n return send_gradient_op, eval_metrics\n\n\nclass _TPUEmbeddingOptimizer(base_layer.BaseLayer):\n \"\"\"Base class for TPUEmbeddingLayer, TPUEmbeddingTable optimizers.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('clip_weight_min', None,\n 'The minimum value to clip the weight by; None means -infinity.')\n p.Define('clip_weight_max', None,\n 'The maximum value to clip the weight by; None means +infinity.')\n p.Define(\n 'clip_gradient_min', None,\n 'The minimum value to clip the gradient by; None means -infinity.')\n p.Define(\n 'clip_gradient_max', None,\n 'The maximum value to clip the gradient by; None means +infinity.')\n p.Define(\n 'weight_decay_factor', None,\n 'Amount of weight decay to apply; None means that the weights are not '\n 'decayed.')\n p.Define(\n 'multiply_weight_decay_factor_by_learning_rate', None,\n 'If true, weight_decay_factor is multiplied by the current learning '\n 'rate.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n\n def CreateOptimizerParameters(self, learning_rate):\n \"\"\"Create TPUEmbedding API optimzier parameters.\"\"\"\n return NotImplementedError()\n\n def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):\n \"\"\"Create slot variables and load/retrieve ops.\n\n Args:\n table_vars: A list of all embedding table shard variables.\n tpu_embedding_table: Parent TPUEmbeddingTable layer.\n\n Returns:\n List of load ops\n List of retrieve ops\n \"\"\"\n raise NotImplementedError()\n\n\nclass TPUEmbeddingSGDOptimizer(_TPUEmbeddingOptimizer):\n \"\"\"SGD optimizer for TPUEmbeddingLayer, TPUEmbeddingTable.\"\"\"\n\n def CreateOptimizerParameters(self, learning_rate):\n p = self.params\n return tpu_embedding_lib.StochasticGradientDescentParameters(\n learning_rate=learning_rate,\n clip_weight_min=p.clip_weight_min,\n clip_weight_max=p.clip_weight_max,\n weight_decay_factor=p.weight_decay_factor,\n multiply_weight_decay_factor_by_learning_rate=p\n .multiply_weight_decay_factor_by_learning_rate,\n clip_gradient_min=p.clip_gradient_min,\n clip_gradient_max=p.clip_gradient_max)\n\n def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):\n load_op_list = []\n retrieve_op_list = []\n\n num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts\n table_name = tpu_embedding_table.table_name\n\n for host_id, table_var in zip(range(num_tpu_hosts), table_vars):\n # The slot vars should be on the same device as the table var.\n device_name = tpu_embedding_table.GetDeviceName(host_id)\n with tf.device(device_name), py_utils.outside_all_rewrites():\n # Only the Trainer needs these ops.\n if py_utils.use_tpu():\n # TPU Embedding load/retrieve ops need to be in the outer graph scope.\n with tf.init_scope():\n tf.logging.info('creating load and retrieve ops.')\n load_parameters_op = (\n tpu_embedding_lib.tpu_ops\n .load_tpu_embedding_stochastic_gradient_descent_parameters(\n parameters=table_var,\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n load_op_list.append(load_parameters_op)\n\n retrieved_table = (\n tpu_embedding_lib.tpu_ops\n .retrieve_tpu_embedding_stochastic_gradient_descent_parameters(\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(\n tf.assign(table_var, retrieved_table))\n retrieve_op_list.append(retrieve_parameters_op)\n\n return load_op_list, retrieve_op_list\n\n\nclass TPUEmbeddingAdagradOptimizer(_TPUEmbeddingOptimizer):\n \"\"\"Adagrad optimizer for TPUEmbeddingLayer, TPUEmbeddingTable.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('initial_accumulator', 0.1,\n 'Initial value of Adagrad accumulator.')\n p.Define(\n 'use_gradient_accumulation', True,\n 'Setting this to False makes embedding gradients calculation less '\n 'accurate but faster. See tpu_embedding_lib for more details.')\n return p\n\n def CreateOptimizerParameters(self, learning_rate):\n p = self.params\n return tpu_embedding_lib.AdagradParameters(\n learning_rate=learning_rate,\n initial_accumulator=p.initial_accumulator,\n clip_weight_min=p.clip_weight_min,\n clip_weight_max=p.clip_weight_max,\n weight_decay_factor=p.weight_decay_factor,\n multiply_weight_decay_factor_by_learning_rate=p\n .multiply_weight_decay_factor_by_learning_rate,\n clip_gradient_min=p.clip_gradient_min,\n clip_gradient_max=p.clip_gradient_max)\n\n def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):\n p = self.params\n\n load_op_list = []\n retrieve_op_list = []\n\n num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts\n table_name = tpu_embedding_table.table_name\n slot_var_collections = [tpu_embedding_table.__class__.__name__ + '_vars']\n\n for host_id, table_var in zip(range(num_tpu_hosts), table_vars):\n # The slot vars should be on the same device as the table var.\n device_name = tpu_embedding_table.GetDeviceName(host_id)\n with tf.device(device_name), py_utils.outside_all_rewrites():\n w_ada = py_utils.WeightParams(\n shape=table_var.shape.as_list(),\n init=py_utils.WeightInit.Constant(p.initial_accumulator),\n dtype=p.dtype,\n collections=slot_var_collections)\n var_name = tpu_embedding_table.GetVariableName(host_id) + '/Adagrad'\n tpu_embedding_table.CreateVariable(var_name, w_ada, trainable=False)\n accumulator_var = tpu_embedding_table.vars[var_name]\n\n # Only the Trainer needs these ops.\n if py_utils.use_tpu():\n # Remove the slot vars from the variable list to avoid them being\n # copied to TPU.\n _RemovePrivateVar(tpu_embedding_table, var_name)\n\n # TPU Embedding load/retrieve ops need to be in the outer graph scope.\n with tf.init_scope():\n tf.logging.info('creating load and retrieve ops.')\n load_parameters_op = (\n tpu_embedding_lib.tpu_ops.load_tpu_embedding_adagrad_parameters(\n parameters=table_var,\n accumulators=accumulator_var,\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n load_op_list.append(load_parameters_op)\n\n retrieved_table, retrieved_accumulator = (\n tpu_embedding_lib.tpu_ops\n .retrieve_tpu_embedding_adagrad_parameters(\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(\n tf.assign(table_var, retrieved_table),\n tf.assign(accumulator_var, retrieved_accumulator))\n retrieve_op_list.append(retrieve_parameters_op)\n\n return load_op_list, retrieve_op_list\n\n\nclass TPUEmbeddingAdamOptimizer(_TPUEmbeddingOptimizer):\n \"\"\"Adam optimizer for TPUEmbeddingLayer, TPUEmbeddingTable.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'sum_inside_sqrt', True, 'When this is true, the Adam update'\n 'formula is changed from m / (sqrt(v) + epsilon) to m / '\n 'sqrt(v + epsilon**2). This option improves the performance of'\n 'TPU training and is not expected to harm model quality.')\n p.Define('lazy_adam', True, 'Use lazy Adam instead of Adam. Lazy Adam'\n 'trains faster.')\n p.Define('beta1', 0.9, 'The exponential decay rate for the 1st moment'\n 'estimates')\n p.Define('beta2', 0.999, 'The exponential decay rate for the 2nd moment'\n 'estimates')\n p.Define('epsilon', 1e-08, 'A small constant for numerical stability')\n p.Define(\n 'use_gradient_accumulation', True, 'Setting this to False makes'\n 'embedding gradients calculation less accurate but faster')\n\n return p\n\n def CreateOptimizerParameters(self, learning_rate):\n p = self.params\n return tpu_embedding_lib.AdamParameters(\n learning_rate=learning_rate,\n beta1=p.beta1,\n beta2=p.beta2,\n epsilon=p.epsilon,\n lazy_adam=p.lazy_adam,\n sum_inside_sqrt=p.sum_inside_sqrt,\n use_gradient_accumulation=p.use_gradient_accumulation,\n clip_weight_min=p.clip_weight_min,\n clip_weight_max=p.clip_weight_max,\n weight_decay_factor=p.weight_decay_factor,\n multiply_weight_decay_factor_by_learning_rate=p\n .multiply_weight_decay_factor_by_learning_rate,\n clip_gradient_min=p.clip_gradient_min,\n clip_gradient_max=p.clip_gradient_max)\n\n def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):\n p = self.params\n\n load_op_list = []\n retrieve_op_list = []\n\n num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts\n table_name = tpu_embedding_table.table_name\n slot_var_collections = [tpu_embedding_table.__class__.__name__ + '_vars']\n\n for host_id, table_var in zip(range(num_tpu_hosts), table_vars):\n # The slot vars should be on the same device as the table var.\n device_name = tpu_embedding_table.GetDeviceName(host_id)\n with tf.device(device_name), py_utils.outside_all_rewrites():\n m_adam = py_utils.WeightParams(\n shape=table_var.shape.as_list(),\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=slot_var_collections)\n var_name_m = tpu_embedding_table.GetVariableName(host_id) + '/Adam/m'\n tpu_embedding_table.CreateVariable(var_name_m, m_adam, trainable=False)\n m_var = tpu_embedding_table.vars[var_name_m]\n\n v_adam = py_utils.WeightParams(\n shape=table_var.shape.as_list(),\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=slot_var_collections)\n var_name_v = tpu_embedding_table.GetVariableName(host_id) + '/Adam/v'\n tpu_embedding_table.CreateVariable(var_name_v, v_adam, trainable=False)\n v_var = tpu_embedding_table.vars[var_name_v]\n\n # Only the Trainer needs these ops.\n if py_utils.use_tpu():\n # Remove the slot vars from the variable list to avoid them being\n # copied to TPU.\n _RemovePrivateVar(tpu_embedding_table, var_name_m)\n _RemovePrivateVar(tpu_embedding_table, var_name_v)\n\n # TPU Embedding load/retrieve ops need to be in the outer graph scope.\n with tf.init_scope():\n tf.logging.info('creating load and retrieve ops.')\n load_parameters_op = (\n tpu_embedding_lib.tpu_ops.load_tpu_embedding_adam_parameters(\n parameters=table_var,\n momenta=m_var,\n velocities=v_var,\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n load_op_list.append(load_parameters_op)\n\n retrieved_table, retrieved_m, retrieved_v = (\n tpu_embedding_lib.tpu_ops\n .retrieve_tpu_embedding_adam_parameters(\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(\n tf.assign(table_var, retrieved_table),\n tf.assign(m_var, retrieved_m), tf.assign(v_var, retrieved_v))\n retrieve_op_list.append(retrieve_parameters_op)\n\n return load_op_list, retrieve_op_list\n\n\nclass TPUEmbeddingFTRLOptimizer(_TPUEmbeddingOptimizer):\n \"\"\"FTRL optimizer for TPUEmbeddingLayer, TPUEmbeddingTable.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'learning_rate_power', -0.5,\n 'A float value, must be less or equal to zero. Controls how the'\n 'learning rate decreases during training. Use zero for a fixed learning'\n 'rate.')\n p.Define(\n 'initial_accumulator_value', 0.1, 'The starting value for'\n 'accumulators. Only zero or positive values are allowed.')\n p.Define(\n 'l1_regularization_strength', 0.0, 'A float value, must be greater'\n 'than or equal to zero. Defaults to 0.0.')\n p.Define(\n 'l2_regularization_strength', 0.0, 'A float value, must be greater'\n 'than or equal to zero. Defaults to 0.0.')\n p.Define('multiply_linear_by_learning_rate', False, 'Whether multiply'\n 'linear by learning rate.')\n p.Define(\n 'beta', 0.0, 'A float value, representing the beta value from the'\n 'FTLR paper. Defaults to 0.0.')\n p.Define('allow_zero_accumulator', False, 'Whether allowing zero'\n 'accumulator.')\n p.Define('use_gradient_accumulation', True, 'Use gradient accumulation.')\n p.Define('initial_linear_value', 0.0, 'Initial linear value.')\n\n return p\n\n def CreateOptimizerParameters(self, learning_rate):\n p = self.params\n return tpu_embedding_lib.FtrlParameters(\n learning_rate=learning_rate,\n learning_rate_power=p.learning_rate_power,\n initial_accumulator_value=p.initial_accumulator_value,\n l1_regularization_strength=p.l1_regularization_strength,\n l2_regularization_strength=p.l2_regularization_strength,\n use_gradient_accumulation=p.use_gradient_accumulation,\n clip_weight_min=p.clip_weight_min,\n clip_weight_max=p.clip_weight_max,\n weight_decay_factor=p.weight_decay_factor,\n multiply_weight_decay_factor_by_learning_rate=p\n .multiply_weight_decay_factor_by_learning_rate,\n multiply_linear_by_learning_rate=p.multiply_linear_by_learning_rate,\n beta=p.beta,\n allow_zero_accumulator=p.allow_zero_accumulator,\n clip_gradient_min=p.clip_gradient_min,\n clip_gradient_max=p.clip_gradient_max)\n\n def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):\n p = self.params\n\n load_op_list = []\n retrieve_op_list = []\n\n num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts\n table_name = tpu_embedding_table.table_name\n slot_var_collections = [tpu_embedding_table.__class__.__name__ + '_vars']\n\n for host_id, table_var in zip(range(num_tpu_hosts), table_vars):\n # The slot vars should be on the same device as the table var.\n device_name = tpu_embedding_table.GetDeviceName(host_id)\n with tf.device(device_name), py_utils.outside_all_rewrites():\n accumulator = py_utils.WeightParams(\n shape=table_var.shape.as_list(),\n init=py_utils.WeightInit.Constant(p.initial_accumulator_value),\n dtype=p.dtype,\n collections=slot_var_collections)\n accumulator_name = (\n tpu_embedding_table.GetVariableName(host_id) + '/Ftrl')\n tpu_embedding_table.CreateVariable(\n accumulator_name, accumulator, trainable=False)\n accumulator_var = tpu_embedding_table.vars[accumulator_name]\n\n linear = py_utils.WeightParams(\n shape=table_var.shape.as_list(),\n init=py_utils.WeightInit.Constant(p.initial_linear_value),\n dtype=p.dtype,\n collections=slot_var_collections)\n linear_name = tpu_embedding_table.GetVariableName(host_id) + '/Ftrl_1'\n tpu_embedding_table.CreateVariable(linear_name, linear, trainable=False)\n linear_var = tpu_embedding_table.vars[linear_name]\n\n # Only the Trainer needs these ops.\n if py_utils.use_tpu():\n # Remove the slot vars from the variable list to avoid them being\n # copied to TPU.\n _RemovePrivateVar(tpu_embedding_table, accumulator_name)\n _RemovePrivateVar(tpu_embedding_table, linear_name)\n\n # TPU Embedding load/retrieve ops need to be in the outer graph scope.\n with tf.init_scope():\n tf.logging.info('creating load and retrieve ops.')\n load_parameters_op = (\n tpu_embedding_lib.tpu_ops.load_tpu_embedding_ftrl_parameters(\n parameters=table_var,\n accumulators=accumulator_var,\n linears=linear_var,\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n load_op_list.append(load_parameters_op)\n\n retrieved_table, retrieved_accumulator, retrieved_linear = (\n tpu_embedding_lib.tpu_ops\n .retrieve_tpu_embedding_ftrl_parameters(\n table_name=table_name,\n num_shards=num_tpu_hosts,\n shard_id=host_id))\n retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(\n tf.assign(table_var, retrieved_table),\n tf.assign(accumulator_var, retrieved_accumulator),\n tf.assign(linear_var, retrieved_linear))\n retrieve_op_list.append(retrieve_parameters_op)\n\n return load_op_list, retrieve_op_list\n\n\nclass TPUEmbeddingTable(base_layer.BaseLayer):\n \"\"\"An embedding table controlled by TPUEmbeddingLayer.\n\n Note that all input_keys needs to be declared upfront.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('vocab_size', 0, 'Depth of the input.')\n p.Define('embedding_dim', 0, 'Depth of the output.')\n p.Define('input_keys', None, 'Name of inputs in InputBatch.')\n p.Define(\n 'combiner', 'mean',\n 'Must be \"sum\", \"sqrtn\", \"mean\" or None in the case of a '\n '\"sequence embedding \"')\n p.Define(\n 'max_sequence_length', None,\n 'If not None or 0, embedding lookup will return a '\n '\"sequence embedding\" of shape '\n '`[batch, max_sequence_length, embedding_dim]` without applying a '\n 'sequence reducing combiner')\n p.Define('num_tpu_hosts', 0, 'Total number of TPU hosts.')\n p.Define(\n 'optimizer', None,\n 'Table optimizer parameters. Will override the optimizer parameters '\n 'defined in this table\\'s TPUEmbeddingLayer.')\n p.Define('learning_rate', None,\n 'Overrides TPUEmbeddingLayer\\'s learning_rate.')\n p.Define('lr_schedule', None, 'Overrides TPUEmbeddingLayer\\'s lr_schedule.')\n p.Define(\n 'inference_use_merged_variable', False,\n 'Whether to use merged embedding table variable during inference. '\n 'If set to True, only one table variable will be created, and '\n 'the user will need to manually merge the sharded table variables '\n 'in the trained checkpoint before generating the inference graph.')\n p.Define(\n 'inference_use_bfloat16', False,\n 'Whether to use bfloat16 as variable dtype for embedding table during '\n 'inference. If set to True, the variables in the inference checkpoint '\n 'must be in bfloat16 format, and the conversion (float->bfloat16) '\n 'need to be done offline.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.vocab_size > 0\n assert p.embedding_dim > 0\n assert p.input_keys\n assert p.name\n assert p.num_tpu_hosts > 0\n if p.combiner is None:\n assert p.max_sequence_length\n if p.max_sequence_length is not None and p.max_sequence_length > 0:\n assert p.combiner is None\n assert p.optimizer\n assert p.learning_rate\n assert p.lr_schedule\n\n self._ids_per_shard = int(math.ceil(float(p.vocab_size) / p.num_tpu_hosts))\n self._padded_vocab_size = self._ids_per_shard * p.num_tpu_hosts\n self._input_keys = p.input_keys\n\n self._max_sequence_length = 0\n if p.max_sequence_length:\n self._max_sequence_length = p.max_sequence_length\n\n self.CreateChild('optimizer', p.optimizer)\n self.CreateChild('schedule', p.lr_schedule)\n self._tpu_embedding_collection = TpuEmbeddingCollection.Get()\n\n def LearningRateFn(step):\n with py_utils.GlobalStepContext(step):\n lr = self.schedule.Value() * p.learning_rate\n self._tpu_embedding_collection.AddSummaryTensor(\n 'tpu_embedding_lr/{}'.format(p.name), lr)\n return lr\n\n self._table_name = '{}_table'.format(p.name)\n self._table_config = tpu_embedding_lib.TableConfig(\n self._padded_vocab_size,\n p.embedding_dim,\n combiner=p.combiner,\n learning_rate=None,\n learning_rate_fn=LearningRateFn,\n # All TableConfigs passed to API will have a learning rate function,\n # so the learning_rate in the optimization_parameters is not used.\n optimization_parameters=self.optimizer.CreateOptimizerParameters(\n p.learning_rate))\n\n def _CreateLayerVariables(self):\n p = self.params\n\n # Reuse the singleton table variables if they were created before.\n all_table_vars = self._tpu_embedding_collection.table_variables\n if self.table_name in all_table_vars:\n embedding_table_vars = all_table_vars[self.table_name]\n else:\n inference_with_merged_var = (\n p.is_inference and p.inference_use_merged_variable)\n is_inference_with_bfloat16 = (p.is_inference and p.inference_use_bfloat16)\n dtype = tf.bfloat16 if is_inference_with_bfloat16 else p.dtype\n w_pc = py_utils.WeightParams(\n shape=[\n p.vocab_size if inference_with_merged_var else\n self._ids_per_shard, p.embedding_dim\n ],\n init=p.params_init,\n dtype=dtype,\n collections=[self.__class__.__name__ + '_vars'])\n\n embedding_table_vars = []\n if inference_with_merged_var:\n with py_utils.outside_all_rewrites():\n var_name = 'merged_var'\n self.CreateVariable(var_name, w_pc)\n embedding_var = self.vars[var_name]\n embedding_table_vars.append(embedding_var)\n # Remove from _private_vars / _private_thetas to be added later as wm.\n _RemovePrivateVar(self, var_name)\n else:\n for i in range(p.num_tpu_hosts):\n device_name = self.GetDeviceName(i)\n with tf.device(device_name), py_utils.outside_all_rewrites():\n var_name = self.GetVariableName(i)\n self.CreateVariable(var_name, w_pc)\n embedding_var = self.vars[var_name]\n embedding_table_vars.append(embedding_var)\n # Remove from _private_vars / _private_thetas to be added later as\n # wm.\n _RemovePrivateVar(self, var_name)\n\n # Track the table variables so they can be excluded from EMA.\n self._tpu_embedding_collection.AddTableVariables(\n self.table_name, embedding_table_vars, is_inference_with_bfloat16)\n\n if not _IsTpuTraining(p):\n # We don't need this for TrainerTpu, as the vars are not directly\n # accessed besides in the TPU embeddding load/retrieve ops.\n # However, this is needed for CPU (eval/decode/controller).\n self._private_vars['wm'] = embedding_table_vars\n self._private_theta['wm'] = embedding_table_vars\n\n # If slot variables and load/retrieve ops were created before, maybe by a\n # different program or task, don't create it again.\n # Note that there should be only one copy of slot variables and\n # load/retrieve ops in the graph and they're shared by different\n # tasks/programs.\n all_load_ops = self._tpu_embedding_collection.load_ops\n if self.table_name not in all_load_ops:\n assert self.table_name not in self._tpu_embedding_collection.retrieve_ops\n # Only trainer and controller (for checkpointing) need slot variables.\n # Only trainer needs load/retrieve ops.\n if not self.do_eval and not p.is_inference:\n load_ops, retrieve_ops = self.optimizer.CreateSlotVariablesAndOps(\n embedding_table_vars, self)\n self._tpu_embedding_collection.AddLoadRetrieveOps(\n self.table_name, load_ops, retrieve_ops)\n\n # Return device to place sharded variables on.\n def GetDeviceName(self, host_id):\n if self.params.is_inference:\n # This is to place variables on the same device as other variables.\n return None\n if self.do_eval:\n return '/cpu:0'\n else:\n return '{}/replica:0/task:{}/device:CPU:0'.format(\n self.cluster.params.worker.name, host_id)\n\n # Return variable name for embedding table shards.\n def GetVariableName(self, host_id):\n return 'var_%d' % host_id\n\n @property\n def table_config(self):\n return self._table_config\n\n @property\n def table_name(self):\n return self._table_name\n\n @property\n def input_keys(self):\n return self._input_keys\n\n @property\n def max_sequence_length(self):\n return self._max_sequence_length\n\n def _SequenceEmbLookup(self, dense_ids: tf.Tensor,\n partition_strategy: str) -> tf.Tensor:\n \"\"\"Sequence embedding lookup.\n\n Note that we do not support padding ids in sequence embeddings.\n\n Args:\n dense_ids: An int Tensor of shape [batch, sequence].\n partition_strategy: See TPUEmbeddingLayer partition_strategy param.\n\n Returns:\n A float32 activations Tensor of shape\n [batch, max_sequence_length, embedding_dim].\n \"\"\"\n p = self.params\n embs = tf.nn.embedding_lookup(\n self.theta.wm,\n tf.reshape(dense_ids, [-1]),\n partition_strategy=partition_strategy)\n out_shape = tf.concat([tf.shape(dense_ids), [p.embedding_dim]], 0)\n return tf.reshape(embs, out_shape)\n\n def _CombinerEmbLookup(self, sparse_ids: tf.SparseTensor,\n partition_strategy: str) -> tf.Tensor:\n \"\"\"Combiner embedding lookup.\n\n Args:\n sparse_ids: An int SparseTensor of shape [batch, ...].\n partition_strategy: See TPUEmbeddingLayer partition_strategy param.\n\n Returns:\n A float32 activations Tensor of shape [batch, 1, embedding_dim].\n \"\"\"\n p = self.params\n embs = tf.nn.embedding_lookup_sparse(\n self.theta.wm,\n sparse_ids,\n None, # sp_weights\n combiner=p.combiner,\n partition_strategy=partition_strategy)\n batch_size = sparse_ids.dense_shape[0]\n # For tf.nn.embedding_lookup_sparse, output.dim0 might be different from\n # sparse_ids.dense_shape.dim0.\n # Explicitly pad results to maintain dim0=batch.\n dim0_padlen = tf.cast(batch_size, tf.int32) - tf.shape(embs)[0]\n embs = tf.pad(embs, [[0, dim0_padlen], [0, 0]])\n # [batch, 1, embedding_dim]\n embs = py_utils.HasShape(embs, [batch_size], ndims=1)\n return tf.expand_dims(embs, 1)\n\n def CpuEmbLookup(self, ids_map: py_utils.NestedMap,\n partition_strategy: str) -> py_utils.NestedMap:\n \"\"\"CPU evaluation embedding lookup for dense tensors.\n\n Args:\n ids_map: A NestedMap of nested `input_key` string -> [batch, sequence]\n int Tensor. For sequence embeddings, -1 is used as a padding id.\n Non-sequence embeddings do not support padded ids.\n partition_strategy: See TPUEmbeddingLayer partition_strategy param.\n\n Returns:\n An activations NestedMap of nested string -> float32 Tensor.\n For non-sequence embeddings: [batch, 1, embedding_dim]\n For sequence embeddings: [batch, max_sequence_length, embedding_dim]\n \"\"\"\n if self.max_sequence_length > 0:\n # \"Sequence embedding\", no combiner case\n return ids_map.Transform(\n lambda ids: self._SequenceEmbLookup(ids, partition_strategy))\n else:\n # Non-\"Sequence embedding\", combiner case\n def _Lookup(ids):\n # Dense to sparse.\n dense_shape = tf.shape(ids, out_type=tf.int64)\n sample_indices = tf.cast(tf.where(tf.not_equal(ids, -1)), tf.int64)\n embedding_indices = tf.cast(tf.gather_nd(ids, sample_indices), tf.int64)\n # [?, embedding_dim]\n sparse_ids = tf.SparseTensor(\n indices=sample_indices,\n values=embedding_indices,\n dense_shape=dense_shape)\n return self._CombinerEmbLookup(sparse_ids, partition_strategy)\n\n return ids_map.Transform(_Lookup)\n\n def CpuEmbLookupSparse(self, ids_map: py_utils.NestedMap,\n partition_strategy: str) -> py_utils.NestedMap:\n \"\"\"CPU evaluation embedding lookup for SparseTensors.\n\n Args:\n ids_map: A NestedMap of nested `input_key` string -> [batch, ...] int\n SparseTensor.\n partition_strategy: See TPUEmbeddingLayer partition_strategy param.\n\n Returns:\n An activations NestedMap of nested string -> float32 Tensor.\n For non-sequence embeddings: [batch, 1, embedding_dim]\n For sequence embeddings: [batch, max_sequence_length, embedding_dim]\n \"\"\"\n if self.max_sequence_length > 0:\n # \"Sequence embedding\", no combiner case\n def _Lookup(ids):\n # Sparse to dense.\n dense_ids = tf.sparse.to_dense(ids, default_value=-1)\n return self._SequenceEmbLookup(dense_ids, partition_strategy)\n\n return ids_map.Transform(_Lookup)\n else:\n # Non-\"Sequence embedding\", combiner case\n return ids_map.Transform(\n lambda ids: self._CombinerEmbLookup(ids, partition_strategy))\n\n\nclass TPUEmbeddingLayer(base_layer.BaseLayer):\n \"\"\"Monolithic interface to TPU embedding.\n\n This layer has some important caveats, due to the interface of the\n TPU embedding hardware. Its behavior most closely mimics that of\n tf.nn.embedding_lookup_sparse.\n\n Supports multiple tables and multiple input_keys per table.\n Requires its own optimizer parameters.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_tpu_hosts', 0, 'Total number of TPU hosts.')\n p.Define('tables', None, 'TPUEmbeddingTables')\n p.Define('pipeline_execution_with_tensor_core', False,\n 'Set to True to be faster. See tpu_embedding.py for details.')\n p.Define('batch_size', 0, 'Per-core batch size.')\n p.Define(\n 'optimizer', TPUEmbeddingAdagradOptimizer.Params(),\n 'Layer optimizer parameters. Will be used for any TPUEmbeddingTables '\n 'with None optimizer parameters.')\n p.Define('learning_rate', 0.0, 'Learning rate.')\n p.Define(\n 'lr_schedule', schedule.ContinuousSchedule.Params(),\n 'Lingvo learning rate schedule. Will be multiplied to learning rate.')\n p.Define(\n 'partition_strategy', 'div', 'A string, either \"mod\" or \"div\", '\n 'specifying how to map the lookup id to the embedding tensor. For '\n 'more information see `tf.nn.embedding_lookup_sparse`.')\n p.Define(\n 'gradient_multiplier_schedule', schedule.ConstantOne.Params(),\n 'Values from this schedule will be multiplied to the embedding '\n 'gradients. Gradients from Tensorcore will not be affected.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n\n assert p.tables\n assert p.batch_size > 0\n assert p.name\n assert p.gradient_multiplier_schedule\n assert p.partition_strategy in ['mod', 'div']\n\n if p.num_tpu_hosts > 0:\n for table_params in p.tables:\n num_tpu_hosts = table_params.num_tpu_hosts\n if num_tpu_hosts > 0 and num_tpu_hosts != p.num_tpu_hosts:\n raise ValueError(\n f'num_tpu_hosts mismatch: {num_tpu_hosts} vs {p.num_tpu_hosts}')\n table_params.num_tpu_hosts = p.num_tpu_hosts\n else:\n num_tpu_hosts = p.tables[0].num_tpu_hosts\n assert all([t.num_tpu_hosts == num_tpu_hosts for t in p.tables])\n\n # Stop if a table has no optimizer related parameters and the layer also\n # has no optimizer parameters\n for param_name in ['optimizer', 'learning_rate', 'lr_schedule']:\n table_param_missing = any(\n table_params.Get(param_name) is None for table_params in p.tables)\n if not p.Get(param_name) and table_param_missing:\n raise ValueError(\n f'A table is missing {param_name} parameters, and no layer-level '\n f'{param_name} parameters were given.')\n elif table_param_missing:\n for table_params in p.tables:\n if table_params.Get(param_name) is None:\n value = p.Get(param_name)\n if isinstance(value, hyperparams.Params):\n value = value.Copy() # Avoid mutating the original copy.\n table_params.Set(**{param_name: value})\n\n self.CreateChildren('tables', p.tables)\n self.CreateChild('gradient_multiplier_schedule',\n p.gradient_multiplier_schedule)\n self._tpu_embedding_collection = TpuEmbeddingCollection.Get()\n\n # Save embedding feature names in the collection.\n feature_names = set()\n for table in self.tables:\n for feature in table.input_keys:\n if feature in feature_names:\n raise ValueError(f'Input key {feature} was used by multiple tables.')\n feature_names.add(feature)\n self._tpu_embedding_collection.feature_names = feature_names\n\n def _CreateChildrenVariables(self):\n # Backwards compatibility: manually call child.InstantiateVariables()\n # outside of tf.variable_scope(p.name).\n for table in self.tables:\n table.InstantiateVariables()\n super()._CreateChildrenVariables()\n\n def _CheckTPUEmbeddingConfig(self, tpu_embedding, table_to_config_dict,\n feature_to_config_dict, global_batch_size):\n \"\"\"Check that the existing tpu_embedding config matches the given ones.\"\"\"\n\n def _Match(d1, d2, namedtuple_attrs_to_check):\n if len(d1) != len(d2):\n return False\n for k, v1 in d1.items():\n if k not in d2:\n return False\n v2 = d2[k]\n for attr in namedtuple_attrs_to_check:\n if getattr(v1, attr) != getattr(v2, attr):\n return False\n return True\n\n # We just check numeric/string settings for simplicity, this excludes things\n # like learning_rate_fn, optimization_parameters, etc since it's hard to\n # compare them.\n if not _Match(tpu_embedding.table_to_config_dict, table_to_config_dict,\n ['vocabulary_size', 'dimension', 'combiner']):\n raise ValueError('table_to_config_dict mismatch. '\n f'Expecting {tpu_embedding.table_to_config_dict}, '\n f'got {table_to_config_dict}')\n if not _Match(tpu_embedding.feature_to_config_dict, feature_to_config_dict,\n ['table_id', 'max_sequence_length']):\n raise ValueError('feature_to_config_dict mismatch. '\n f'Expecting {tpu_embedding.feature_to_config_dict}, '\n f'got {feature_to_config_dict}')\n if (tpu_embedding.batch_size_per_core * tpu_embedding.num_cores !=\n global_batch_size):\n raise ValueError(\n 'global_batch_size mismatch. '\n f'batch_size_per_core: {tpu_embedding.batch_size_per_core}, '\n f'num_cores: {tpu_embedding.num_cores}, '\n f'global_batch_size: {global_batch_size}')\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n\n # At the feature level, track which are associated\n # with \"sequence embeddings\".\n self._sequence_features = {}\n\n if _IsTpuTraining(p):\n num_cores = self.cluster.params.worker.tpus_per_replica\n global_batch_size = (\n self.params.batch_size * self.cluster.num_splits_per_client)\n table_to_config_dict = {}\n feature_to_config_dict = {}\n for table in self.tables:\n table_to_config_dict[table.table_name] = table.table_config\n for feature in table.input_keys:\n if table.max_sequence_length > 0:\n self._sequence_features[feature] = True\n feature_to_config_dict[feature] = tpu_embedding_lib.FeatureConfig(\n table.table_name, max_sequence_length=table.max_sequence_length)\n\n tpu_embedding = self._tpu_embedding_collection.tpu_embedding\n if tpu_embedding:\n self._CheckTPUEmbeddingConfig(tpu_embedding, table_to_config_dict,\n feature_to_config_dict, global_batch_size)\n tf.logging.info('TPUEmbedding API singleton already exists, reusing')\n self._tpu_embedding = tpu_embedding\n else:\n mode = tpu_embedding_lib.TRAINING\n device_config = tpu_embedding_lib.DeviceConfig(\n num_cores=num_cores,\n num_hosts=self.params.tables[0].num_tpu_hosts,\n job_name=self.cluster.params.worker.name)\n self._tpu_embedding = tpu_embedding_lib.TPUEmbedding(\n table_to_config_dict,\n feature_to_config_dict,\n global_batch_size,\n mode,\n master=None,\n pipeline_execution_with_tensor_core=(\n self.params.pipeline_execution_with_tensor_core),\n partition_strategy=p.partition_strategy,\n device_config=device_config)\n self._tpu_embedding_collection.tpu_embedding = self._tpu_embedding\n self._tpu_embedding_collection.SetGradientMultiplierSchedule(\n self.gradient_multiplier_schedule)\n\n def _TpuEmbLookup(self, ids_map: py_utils.NestedMap) -> py_utils.NestedMap:\n \"\"\"TPU Embedding lookup.\"\"\"\n task_call_scope = py_utils.GetTaskCallScope()\n activations = self._tpu_embedding_collection.AddActivations(task_call_scope)\n\n ret = py_utils.NestedMap()\n for k, v in activations.items():\n if ids_map.Get(k) is not None:\n if k in self._sequence_features:\n ret.Set(k, v)\n else:\n # Non-sequence embeddings, we fill the \"time\" dimension with 1.\n with tf.name_scope(k):\n ret.Set(k, tf.expand_dims(v, axis=[1]))\n return ret\n\n def EmbLookup(self, ids_map: py_utils.NestedMap) -> py_utils.NestedMap:\n \"\"\"Looks up embedding vectors for each entry in dense Tensor ids_map.\n\n Since the TPUEmbedding is monolothic, and consulted once per\n FProp/BProp, we must centralize the lookup. Thus, for multiple\n features, we contain them into a single-lookup rather than allowing\n the caller to call Lookup multiple times.\n\n Args:\n ids_map: A NestedMap of nested `input_key` string -> [batch, sequence] int\n Tensor.\n For sequence embeddings, -1 is used as a padding id. Non-sequence\n embeddings do not support padded ids.\n\n Returns:\n Activations NestedMap of nested string ->\n For non-sequence embeddings: [batch, 1, embedding_dim],\n For sequence embeddings: [batch, max_sequence_length, embedding_dim]\n float32 Tensor.\n \"\"\"\n assert isinstance(ids_map, py_utils.NestedMap)\n p = self.params\n\n def CpuEmbLookup(ids_map):\n \"\"\"CPU evaluation embedding lookup.\"\"\"\n rets = py_utils.NestedMap()\n for table in self.tables:\n table_id_map = py_utils.NestedMap()\n for key in table.input_keys:\n if ids_map.Get(key) is not None:\n table_id_map.Set(key, ids_map.GetItem(key))\n table_rets = table.CpuEmbLookup(table_id_map, p.partition_strategy)\n # Merge table_rets with rets\n for key in table.input_keys:\n if ids_map.Get(key) is not None:\n rets.Set(key, table_rets.GetItem(key))\n return rets\n\n if _IsTpuTraining(p):\n return self._TpuEmbLookup(ids_map)\n else:\n return CpuEmbLookup(ids_map)\n\n def EmbLookupSparse(self, ids_map: py_utils.NestedMap) -> py_utils.NestedMap:\n \"\"\"Looks up embedding vectors for each entry in SparseTensor ids_map.\n\n Since the TPUEmbedding is monolothic, and consulted once per\n FProp/BProp, we must centralize the lookup. Thus, for multiple\n features, we contain them into a single-lookup rather than allowing\n the caller to call Lookup multiple times.\n\n Args:\n ids_map: A NestedMap of nested `input_key` string -> [batch, ...] int\n SparseTensor.\n\n Returns:\n Activations NestedMap of nested string ->\n For non-sequence embeddings: [batch, 1, embedding_dim],\n For sequence embeddings: [batch, max_sequence_length, embedding_dim]\n float32 Tensor.\n \"\"\"\n assert isinstance(ids_map, py_utils.NestedMap)\n p = self.params\n\n def CpuEmbLookupSparse(ids_map):\n \"\"\"CPU evaluation embedding lookup.\"\"\"\n rets = py_utils.NestedMap()\n for table in self.tables:\n table_id_map = py_utils.NestedMap()\n for key in table.input_keys:\n if ids_map.Get(key) is not None:\n table_id_map.Set(key, ids_map.GetItem(key))\n table_rets = table.CpuEmbLookupSparse(table_id_map,\n p.partition_strategy)\n # Merge table_rets with rets\n for key in table.input_keys:\n if ids_map.Get(key) is not None:\n rets.Set(key, table_rets.GetItem(key))\n return rets\n\n if _IsTpuTraining(p):\n return self._TpuEmbLookup(ids_map)\n else:\n return CpuEmbLookupSparse(ids_map)\n"
]
| [
[
"tensorflow.python.tpu.tpu_embedding.tpu_ops.load_tpu_embedding_ftrl_parameters",
"tensorflow.python.tpu.tpu_embedding.DeviceConfig",
"tensorflow.python.tpu.tpu_embedding.TPUEmbedding",
"tensorflow.python.tpu.tpu_embedding.FeatureConfig",
"tensorflow.python.tpu.tpu_embedding.tpu_ops.load_tpu_embedding_adam_parameters",
"tensorflow.python.tpu.tpu_embedding.AdagradParameters",
"tensorflow.python.tpu.tpu_embedding.tpu_ops.retrieve_tpu_embedding_adagrad_parameters",
"tensorflow.python.tpu.tpu_embedding.tpu_ops.retrieve_tpu_embedding_ftrl_parameters",
"tensorflow.python.tpu.tpu_embedding.tpu_ops.load_tpu_embedding_adagrad_parameters",
"tensorflow.python.tpu.tpu_embedding.AdamParameters",
"tensorflow.python.tpu.tpu_embedding.tpu_ops.load_tpu_embedding_stochastic_gradient_descent_parameters",
"tensorflow.python.tpu.tpu_embedding.tpu_ops.retrieve_tpu_embedding_adam_parameters",
"tensorflow.python.tpu.tpu_embedding.tpu_ops.retrieve_tpu_embedding_stochastic_gradient_descent_parameters",
"tensorflow.python.tpu.tpu_embedding.FtrlParameters",
"tensorflow.python.tpu.tpu_embedding.StochasticGradientDescentParameters"
]
]
|
lbumbolo/ShapeVariationAnalyzer | [
"976e22cbacc87fb593d92e24cbdbba6c99a64060"
]
| [
"ShapeVariationAnalyzer/Resources/Classifier/trainNeuralNetwork.py"
]
| [
"from __future__ import print_function\nimport os\nimport sys\nfrom six.moves import cPickle as pickle\nimport numpy as np\nimport tensorflow as tf\nimport neuralNetwork as nn\n# import inputData\nimport argparse\nimport json\nimport shutil\nimport zipfile\nimport math\n\n\n# ----------------------------------------------------------------------------- #\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n# \t\t\t\t\t\t\t\tUseful functions \t\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n# ----------------------------------------------------------------------------- #\n\ndef reformat(dataset, labels, classifier):\n \"\"\" Reformat into a shape that's more adapted to the models we're going to train:\n - data as a flat matrix\n - labels as float 1-hot encodings\n \"\"\"\n dataset = dataset.reshape((-1, classifier.NUM_POINTS * classifier.NUM_FEATURES)).astype(np.float32)\n labels = (np.arange(classifier.NUM_CLASSES) == labels[:, None]).astype(np.float32)\n return dataset, labels\n \ndef get_inputs(pickle_file, classifier):\n \"\"\" Reoad the data generated in picklefiel\n \"\"\"\n with open(pickle_file, 'rb') as f:\n if sys.version_info[0] == 2: \n save = pickle.load(f)\n else:\n save = pickle.load(f, encoding='latin1')\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\n train_dataset, train_labels = reformat(train_dataset, train_labels, classifier)\n valid_dataset, valid_labels = reformat(valid_dataset, valid_labels, classifier)\n test_dataset, test_labels = reformat(test_dataset, test_labels, classifier)\n print(\"\\nTraining set\", train_dataset.shape, train_labels.shape)\n print(\"Validation set\", valid_dataset.shape, valid_labels.shape)\n print(\"Test set\", test_dataset.shape, test_labels.shape)\n\n return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels\n\n\ndef run_training(train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, saveModelPath, classifier):\n # TODO: Generic number of layers and their number of nodes\n # if classifier.NUM_HIDDEN_LAYERS == 1:\n # # nb_hidden_nodes_1 = 2048\n # # nb_hidden_nodes_1 = ( classifier.NUM_POINTS * classifier.NUM_FEATURES + classifier.NUM_CLASSES ) // 2\n # nb_hidden_nodes_1 = int ( math.sqrt ( classifier.NUM_POINTS * classifier.NUM_FEATURES * classifier.NUM_CLASSES ))\n # nb_hidden_nodes_2 = 0\n # elif classifier.NUM_HIDDEN_LAYERS == 2:\n # # nb_hidden_nodes_1, nb_hidden_nodes_2 = 2048, 2048\n # r = math.pow( classifier.NUM_POINTS * classifier.NUM_FEATURES / classifier.NUM_CLASSES, 1/3)\n # nb_hidden_nodes_1 = int ( classifier.NUM_CLASSES * math.pow ( r, 2 ))\n # nb_hidden_nodes_2 =int ( classifier.NUM_POINTS * classifier.NUM_FEATURES * r )\n\n # print(\"nb_hidden_nodes_1 : \" + str(classifier.nb_hidden_nodes_1))\n # print(\"nb_hidden_nodes_2 : \" + str(classifier.nb_hidden_nodes_2))\n\n # Construct the graph\n graph = tf.Graph()\n with graph.as_default():\n # Input data.\n with tf.name_scope('Inputs_management'):\n # tf_train_dataset, tf_train_labels = placeholder_inputs(classifier.batch_size, name='data')\n tf_train_dataset = tf.placeholder(tf.float32, shape=(classifier.batch_size, classifier.NUM_POINTS * classifier.NUM_FEATURES), name='tf_train_dataset')\n tf_train_labels = tf.placeholder(tf.int32, shape=(classifier.batch_size, classifier.NUM_CLASSES), name='tf_train_labels')\n\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n\n tf_valid_dataset = tf.constant(valid_dataset, name=\"tf_valid_dataset\")\n tf_test_dataset = tf.constant(test_dataset)\n\n tf_data = tf.placeholder(tf.float32, shape=(1,classifier.NUM_POINTS * classifier.NUM_FEATURES), name=\"input\")\n\n with tf.name_scope('Bias_and_weights_management'):\n weightsDict = classifier.bias_weights_creation(nb_hidden_nodes_1 = classifier.nb_hidden_nodes_1, nb_hidden_nodes_2 = classifier.nb_hidden_nodes_2) \n \n # Training computation.\n with tf.name_scope('Training_computations'):\n logits, weightsDict = classifier.model(tf_train_dataset, weightsDict)\n \n with tf.name_scope('Loss_computation'):\n loss = classifier.loss(logits, tf_train_labels, classifier.lambda_reg, weightsDict)\n \n print(\"Loss computation\")\n with tf.name_scope('Optimization'):\n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(classifier.learning_rate).minimize(loss)\n # optimizer = tf.train.AdagradOptimizer(classifier.learning_rate).minimize(loss)\n \n tf.summary.scalar(\"Loss\", loss)\n summary_op = tf.summary.merge_all()\n saver = tf.train.Saver(weightsDict)\n\n \n with tf.name_scope('Predictions'):\n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(classifier.model(tf_valid_dataset, weightsDict)[0], name=\"valid_prediction\")\n\n data_pred = tf.nn.softmax(classifier.model(tf_data, weightsDict)[0], name=\"output\")\n test_prediction = tf.nn.softmax(classifier.model(tf_test_dataset, weightsDict)[0])\n\n\n # -------------------------- #\n # Let's run it #\n # -------------------------- #\n # \n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n\n # create log writer object\n writer = tf.summary.FileWriter('./train', graph=graph)\n\n for epoch in range(0, classifier.num_epochs):\n for step in range(classifier.num_steps):\n # Pick an offset within the training data, which has been randomized.\n # Note: we could use better randomization across epochs.\n offset = (step * classifier.batch_size) % (train_labels.shape[0] - classifier.batch_size)\n # Generate a minibatch.\n batch_data = train_dataset[offset:(offset + classifier.batch_size), :]\n batch_labels = train_labels[offset:(offset + classifier.batch_size), :]\n # Prepare a dictionary telling the session where to feed the minibatch.\n # The key of the dictionary is the placeholder node of the graph to be fed,\n # and the value is the numpy array to feed to it.\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, keep_prob:0.7}\n _, l, predictions, summary = session.run([optimizer, loss, train_prediction, summary_op], feed_dict=feed_dict)\n # _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n\n\n # write log\n batch_count = 20\n writer.add_summary(summary, epoch * batch_count + step)\n\n\n if (step % 500 == 0):\n print(\"Minibatch loss at step %d: %f\" % (step, l))\n print(\"Minibatch accuracy: %.1f%%\" % classifier.accuracy(predictions, batch_labels)[0])\n print(\"Validation accuracy: %.1f%%\" % classifier.accuracy(valid_prediction.eval(feed_dict = {keep_prob:1.0}), valid_labels)[0])\n\n # finalaccuracy, mat_confusion, PPV, TPR = classifier.accuracy(test_prediction.eval(feed_dict={keep_prob:1.0}), test_labels)\n finalaccuracy, mat_confusion = classifier.accuracy(test_prediction.eval(feed_dict={keep_prob:1.0}), test_labels)\n # finalaccuracy, mat_confusion = classifier.accuracy(valid_prediction.eval(feed_dict={keep_prob:1.0}), valid_labels)\n print(\"Test accuracy: %.1f%%\" % finalaccuracy)\n print(\"\\n\\nConfusion matrix :\\n\" + str(mat_confusion))\n # print \"\\n PPV : \" + str(PPV)\n # print \"\\n TPR : \" + str(TPR)\n\n save_path = saver.save(session, saveModelPath, write_meta_graph=True)\n print(\"Model saved in file: %s\" % save_path)\n \n return finalaccuracy\n\ndef exportModelNetwork(zipPath, outputPath):\n\n\t# Zipper tout ca :: base_name = la ou on veut zipper+zipname\n\n\tshutil.make_archive(base_name = outputPath, format = 'zip', root_dir = os.path.dirname(zipPath), base_dir = os.path.basename(zipPath))\n\n\treturn\n\ndef main(_):\n print(\"\\nTensorFlow current version : \" + str(tf.__version__) + \"\\n\")\n \n # Get the arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument('--inputZip', action='store', dest='inputZip', help='Input zip file which contains the datasets & the parameters for the classifier', \n default = \"\")\n parser.add_argument('--outputZip', action='store', dest='outputZip', help='Output zip file which will contain the neural netowrk trained', \n default = \"\")\n\n args = parser.parse_args()\n\n inputZip = args.inputZip\n outputZip = args.outputZip\n basedir = os.path.dirname(inputZip)\n nameDir = os.path.splitext(os.path.basename(inputZip))[0]\n outputdir = os.path.dirname(outputZip)\n nameOuput = os.path.splitext(os.path.basename(outputZip))[0]\n\n outputPath = os.path.join(outputdir, nameOuput)\n networkDir = os.path.join(basedir, nameDir)\n print(\"networkDir : \" + networkDir)\n\n if os.path.isdir(networkDir):\n shutil.rmtree(networkDir)\n os.mkdir(networkDir) \n\n # Unpack archive\n with zipfile.ZipFile(inputZip) as zf:\n # zf.extractall(networkDir)\n zf.extractall(basedir)\n\n jsonFile = os.path.join(networkDir, 'classifierInfo.json')\n saveModelPath = os.path.join(networkDir, 'CondylesClassifier')\n pickle_file = os.path.join(networkDir, 'datasets.pickle')\n\n #\n # Create a network for the classification\n #\n if sys.version_info[0] == 3: \n with open(jsonFile, encoding='utf-8') as f: \n jsonDict = json.load(f)\n else:\n with open(jsonFile) as f: \n jsonDict = json.load(f)\n\n # In case our JSON file doesnt contain a valid Classifier\n if not 'CondylesClassifier' in jsonDict:\n print(\"Error: Couldn't parameterize the network.\")\n print(\"There is no 'CondylesClassifier' model.\")\n return 0\n\n # If we have the Classifier, set all parameters for the network\n classifier = nn.neuralNetwork()\n\n # Essential parameters\n if 'NUM_CLASSES' in jsonDict['CondylesClassifier']:\n classifier.NUM_CLASSES = jsonDict['CondylesClassifier']['NUM_CLASSES'] \n else:\n print(\"Missing NUM_CLASSES\")\n accuracy = -1\n\n if 'NUM_POINTS' in jsonDict['CondylesClassifier']:\n classifier.NUM_POINTS = jsonDict['CondylesClassifier']['NUM_POINTS']\n else:\n print(\"Missing NUM_POINTS\")\n accuracy = -1\n\n if 'NUM_FEATURES' in jsonDict['CondylesClassifier']:\n classifier.NUM_FEATURES = jsonDict['CondylesClassifier']['NUM_FEATURES']\n else:\n print(\"Missing NUM_FEATURES\")\n accuracy = -1\n\n # TODO: Manage case with incomplete parameterization of the classifier network\n\n\n # Specific parameters\n if 'learning_rate' in jsonDict['CondylesClassifier']:\n classifier.learning_rate = jsonDict['CondylesClassifier']['learning_rate']\n else: \n classifier.learning_rate = 0.0005\n\n if 'lambda_reg' in jsonDict['CondylesClassifier']:\n classifier.lambda_reg = jsonDict['CondylesClassifier']['lambda_reg']\n else:\n classifier.lambda_reg = 0.01\n\n if 'num_epochs' in jsonDict['CondylesClassifier']:\n classifier.num_epochs = jsonDict['CondylesClassifier']['num_epochs']\n else:\n classifier.num_epochs = 2\n\n if 'num_steps'\tin jsonDict['CondylesClassifier']:\n classifier.num_steps = jsonDict['CondylesClassifier']['num_steps']\n else:\n classifier.num_steps = 11\n\n if 'batch_size' in jsonDict['CondylesClassifier']:\n classifier.batch_size = jsonDict['CondylesClassifier']['batch_size']\n else:\n classifier.batch_size = 10\n\n if 'NUM_HIDDEN_LAYERS' in jsonDict['CondylesClassifier']:\n classifier.NUM_HIDDEN_LAYERS = jsonDict['CondylesClassifier']['NUM_HIDDEN_LAYERS']\n if classifier.NUM_HIDDEN_LAYERS:\n classifier.nb_hidden_nodes_1 = jsonDict['CondylesClassifier']['nb_hidden_nodes_1']\n if classifier.NUM_HIDDEN_LAYERS > 1:\n classifier.nb_hidden_nodes_1 = jsonDict['CondylesClassifier']['nb_hidden_nodes_2']\n # if classifier.NUM_HIDDEN_LAYERS > 2:\n # classifier.nb_hidden_nodes_1 = jsonDict['CondylesClassifier']['nb_hidden_nodes_3']\n else:\n classifier.NUM_HIDDEN_LAYERS = 1\n classifier.nb_hidden_nodes_1 = ( classifier.NUM_POINTS * classifier.NUM_FEATURES + classifier.NUM_CLASSES ) // 2\n \n\n train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_inputs(pickle_file, classifier)\n\n accuracy = run_training(train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, saveModelPath, classifier)\n jsonDict['CondylesClassifier']['accuracy'] = accuracy\n with open(os.path.join(networkDir,'classifierInfo.json'), 'w') as f:\n json.dump(jsonDict, f, ensure_ascii=False, indent = 4)\n\n # Zip all those files together\n zipPath = networkDir\n exportModelNetwork(zipPath, outputPath)\n\n return \n\n\nif __name__ == '__main__':\n\ttf.app.run()\n\n\n\n\n\n\n\n"
]
| [
[
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.nn.softmax",
"numpy.arange",
"tensorflow.summary.merge_all",
"tensorflow.summary.FileWriter",
"tensorflow.app.run",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer"
]
]
|
ZFTurbo/keras-retinanet | [
"f0999e1f90b47ebc95fda10b3461fcc0388e5fcc"
]
| [
"keras_retinanet/bin/train.py"
]
| [
"#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nimport keras\nimport keras.preprocessing.image\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin # noqa: F401\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom .. import layers # noqa: F401\nfrom .. import losses\nfrom .. import models\nfrom ..callbacks import RedirectModel\nfrom ..callbacks.eval import Evaluate\nfrom ..models.retinanet import retinanet_bbox\nfrom ..preprocessing.csv_generator import CSVGenerator\nfrom ..preprocessing.kitti import KittiGenerator\nfrom ..preprocessing.open_images import OpenImagesGenerator\nfrom ..preprocessing.pascal_voc import PascalVocGenerator\nfrom ..utils.anchors import make_shapes_callback\nfrom ..utils.config import read_config_file, parse_anchor_parameters\nfrom ..utils.gpu import setup_gpu\nfrom ..utils.image import random_visual_effect_generator\nfrom ..utils.keras_version import check_keras_version\nfrom ..utils.model import freeze as freeze_model\nfrom ..utils.tf_version import check_tf_version\nfrom ..utils.transform import random_transform_generator\n\n\ndef makedirs(path):\n # Intended behavior: try to create the directory,\n # pass if the directory exists already, fails otherwise.\n # Meant for Python 2.7/3.n compatibility.\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef model_with_weights(model, weights, skip_mismatch):\n \"\"\" Load weights for model.\n\n Args\n model : The model to load weights for.\n weights : The weights to load.\n skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.\n \"\"\"\n if weights is not None:\n model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)\n return model\n\n\ndef create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,\n freeze_backbone=False, lr=1e-5, config=None):\n \"\"\" Creates three models (model, training_model, prediction_model).\n\n Args\n backbone_retinanet : A function to call to create a retinanet model with a given backbone.\n num_classes : The number of classes to train.\n weights : The weights to load into the model.\n multi_gpu : The number of GPUs to use for training.\n freeze_backbone : If True, disables learning for the backbone.\n config : Config parameters, None indicates the default configuration.\n\n Returns\n model : The base model. This is also the model that is saved in snapshots.\n training_model : The training model. If multi_gpu=0, this is identical to model.\n prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).\n \"\"\"\n\n modifier = freeze_model if freeze_backbone else None\n\n # load anchor parameters, or pass None (so that defaults will be used)\n anchor_params = None\n num_anchors = None\n if config and 'anchor_parameters' in config:\n anchor_params = parse_anchor_parameters(config)\n num_anchors = anchor_params.num_anchors()\n\n # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.\n # optionally wrap in a parallel model\n if multi_gpu > 1:\n from keras.utils import multi_gpu_model\n with tf.device('/cpu:0'):\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)\n training_model = multi_gpu_model(model, gpus=multi_gpu)\n else:\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)\n training_model = model\n\n # make prediction model\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n\n # compile model\n training_model.compile(\n loss={\n 'regression' : losses.smooth_l1(),\n 'classification': losses.focal()\n },\n optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)\n )\n\n return model, training_model, prediction_model\n\n\ndef create_callbacks(model, training_model, prediction_model, validation_generator, args):\n \"\"\" Creates the callbacks to use during training.\n\n Args\n model: The base model.\n training_model: The model that is used for training.\n prediction_model: The model that should be used for validation.\n validation_generator: The generator for creating validation data.\n args: parseargs args object.\n\n Returns:\n A list of callbacks used for training.\n \"\"\"\n callbacks = []\n\n tensorboard_callback = None\n\n if args.tensorboard_dir:\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir = args.tensorboard_dir,\n histogram_freq = 0,\n batch_size = args.batch_size,\n write_graph = True,\n write_grads = False,\n write_images = False,\n embeddings_freq = 0,\n embeddings_layer_names = None,\n embeddings_metadata = None\n )\n\n if args.evaluation and validation_generator:\n if args.dataset_type == 'coco':\n from ..callbacks.coco import CocoEval\n\n # use prediction model for evaluation\n evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)\n else:\n evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)\n evaluation = RedirectModel(evaluation, prediction_model)\n callbacks.append(evaluation)\n\n # save the model\n if args.snapshots:\n # ensure directory created first; otherwise h5py will error after epoch.\n makedirs(args.snapshot_path)\n checkpoint = keras.callbacks.ModelCheckpoint(\n os.path.join(\n args.snapshot_path,\n '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)\n ),\n verbose=1,\n # save_best_only=True,\n # monitor=\"mAP\",\n # mode='max'\n )\n checkpoint = RedirectModel(checkpoint, model)\n callbacks.append(checkpoint)\n\n callbacks.append(keras.callbacks.ReduceLROnPlateau(\n monitor = 'loss',\n factor = 0.1,\n patience = 2,\n verbose = 1,\n mode = 'auto',\n min_delta = 0.0001,\n cooldown = 0,\n min_lr = 0\n ))\n\n if args.tensorboard_dir:\n callbacks.append(tensorboard_callback)\n\n return callbacks\n\n\ndef create_generators(args, preprocess_image):\n \"\"\" Create generators for training and validation.\n\n Args\n args : parseargs object containing configuration for generators.\n preprocess_image : Function that preprocesses an image for the network.\n \"\"\"\n common_args = {\n 'batch_size' : args.batch_size,\n 'config' : args.config,\n 'image_min_side' : args.image_min_side,\n 'image_max_side' : args.image_max_side,\n 'no_resize' : args.no_resize,\n 'preprocess_image' : preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n visual_effect_generator = random_visual_effect_generator(\n contrast_range=(0.9, 1.1),\n brightness_range=(-.1, .1),\n hue_range=(-0.05, 0.05),\n saturation_range=(0.95, 1.05)\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n visual_effect_generator = None\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n train_generator = CocoGenerator(\n args.coco_path,\n 'train2017',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'pascal':\n train_generator = PascalVocGenerator(\n args.pascal_path,\n 'trainval',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = PascalVocGenerator(\n args.pascal_path,\n 'test',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'csv':\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations,\n args.classes,\n shuffle_groups=False,\n **common_args\n )\n else:\n validation_generator = None\n elif args.dataset_type == 'oid':\n train_generator = OpenImagesGenerator(\n args.main_dir,\n subset='train',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = OpenImagesGenerator(\n args.main_dir,\n subset='validation',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'kitti':\n train_generator = KittiGenerator(\n args.kitti_path,\n subset='train',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = KittiGenerator(\n args.kitti_path,\n subset='val',\n shuffle_groups=False,\n **common_args\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return train_generator, validation_generator\n\n\ndef check_args(parsed_args):\n \"\"\" Function to check for inherent contradictions within parsed arguments.\n For example, batch_size < num_gpus\n Intended to raise errors prior to backend initialisation.\n\n Args\n parsed_args: parser.parse_args()\n\n Returns\n parsed_args\n \"\"\"\n\n if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:\n raise ValueError(\n \"Batch size ({}) must be equal to or higher than the number of GPUs ({})\".format(parsed_args.batch_size,\n parsed_args.multi_gpu))\n\n if parsed_args.multi_gpu > 1 and parsed_args.snapshot:\n raise ValueError(\n \"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.\".format(parsed_args.multi_gpu,\n parsed_args.snapshot))\n\n if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:\n raise ValueError(\"Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.\")\n\n if 'resnet' not in parsed_args.backbone:\n warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))\n\n return parsed_args\n\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')\n subparsers.required = True\n\n coco_parser = subparsers.add_parser('coco')\n coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')\n\n pascal_parser = subparsers.add_parser('pascal')\n pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')\n\n kitti_parser = subparsers.add_parser('kitti')\n kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')\n\n def csv_list(string):\n return string.split(',')\n\n oid_parser = subparsers.add_parser('oid')\n oid_parser.add_argument('main_dir', help='Path to dataset directory.')\n oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')\n oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)\n oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')\n oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)\n\n csv_parser = subparsers.add_parser('csv')\n csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')\n csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')\n csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--snapshot', help='Resume training from a snapshot.')\n group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)\n group.add_argument('--weights', help='Initialize the model with weights from a file.')\n group.add_argument('--no-weights', help='Don\\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)\n\n parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)\n parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)\n parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')\n parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)\n parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')\n parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)\n parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)\n parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)\n parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \\'./snapshots\\')', default='./snapshots')\n parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')\n parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')\n parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')\n parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')\n parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')\n parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)\n parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)\n parser.add_argument('--no-resize', help='Don''t rescale the image.', action='store_true')\n parser.add_argument('--config', help='Path to a configuration parameters .ini file.')\n parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')\n parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss', action='store_true')\n\n # Fit generator arguments\n parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')\n parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)\n parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int, default=10)\n\n return check_args(parser.parse_args(args))\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # create object that stores backbone information\n backbone = models.backbone(args.backbone)\n\n # make sure keras and tensorflow are the minimum required version\n check_keras_version()\n check_tf_version()\n\n # optionally choose specific GPU\n if args.gpu:\n setup_gpu(args.gpu)\n\n # optionally load config parameters\n if args.config:\n args.config = read_config_file(args.config)\n\n # create the generators\n train_generator, validation_generator = create_generators(args, backbone.preprocess_image)\n\n # create the model\n if args.snapshot is not None:\n print('Loading model, this may take a second...')\n model = models.load_model(args.snapshot, backbone_name=args.backbone)\n training_model = model\n anchor_params = None\n if args.config and 'anchor_parameters' in args.config:\n anchor_params = parse_anchor_parameters(args.config)\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n else:\n weights = args.weights\n # default to imagenet if nothing else is specified\n if weights is None and args.imagenet_weights:\n weights = backbone.download_imagenet()\n\n print('Creating model, this may take a second...')\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone.retinanet,\n num_classes=train_generator.num_classes(),\n weights=weights,\n multi_gpu=args.multi_gpu,\n freeze_backbone=args.freeze_backbone,\n lr=args.lr,\n config=args.config\n )\n\n # print model summary\n print(model.summary())\n\n # this lets the generator compute backbone layer shapes using the actual backbone model\n if 'vgg' in args.backbone or 'densenet' in args.backbone:\n train_generator.compute_shapes = make_shapes_callback(model)\n if validation_generator:\n validation_generator.compute_shapes = train_generator.compute_shapes\n\n # create the callbacks\n callbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n validation_generator,\n args,\n )\n\n if not args.compute_val_loss:\n validation_generator = None\n\n # start training\n return training_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks,\n workers=args.workers,\n use_multiprocessing=args.multiprocessing,\n max_queue_size=args.max_queue_size,\n validation_data=validation_generator\n )\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"tensorflow.device"
]
]
|
hayao-k/aws-cost-explorer-api-samples | [
"fe21a520ff2dd1da4d16c2fa18f1feaa84b79457"
]
| [
"daily-cost-by-account/lambda_function.py"
]
| [
"from logging import getLogger, INFO\nimport os\nimport datetime\nimport boto3\nimport pandas\nfrom botocore.exceptions import ClientError\n\nlogger = getLogger()\nlogger.setLevel(INFO)\n\ndef upload_s3(output, key, bucket):\n try:\n s3_resource = boto3.resource('s3')\n s3_bucket = s3_resource.Bucket(bucket)\n s3_bucket.upload_file(output, key, ExtraArgs={'ACL': 'bucket-owner-full-control'})\n except ClientError as err:\n logger.error(err.response['Error']['Message'])\n raise\n\ndef get_ou_ids(org, parent_id):\n ou_ids = []\n\n try:\n paginator = org.get_paginator('list_children')\n iterator = paginator.paginate(\n ParentId=parent_id,\n ChildType='ORGANIZATIONAL_UNIT'\n )\n for page in iterator:\n for ou in page['Children']:\n ou_ids.append(ou['Id'])\n ou_ids.extend(get_ou_ids(org, ou['Id']))\n except ClientError as err:\n logger.error(err.response['Error']['Message'])\n raise\n else:\n return ou_ids\n\ndef list_accounts():\n org = boto3.client('organizations')\n root_id = 'r-xxxx'\n ou_id_list = [root_id]\n ou_id_list.extend(get_ou_ids(org, root_id))\n accounts = []\n\n try:\n for ou_id in ou_id_list:\n paginator = org.get_paginator('list_accounts_for_parent')\n page_iterator = paginator.paginate(ParentId=ou_id)\n for page in page_iterator:\n for account in page['Accounts']:\n item = [\n account['Id'],\n account['Name'],\n ]\n accounts.append(item)\n except ClientError as err:\n logger.error(err.response['Error']['Message'])\n raise\n else:\n return accounts\n\ndef get_cost_json(start, end):\n ce = boto3.client('ce')\n response = ce.get_cost_and_usage(\n TimePeriod={\n 'Start': start,\n 'End' : end,\n },\n Granularity='DAILY',\n Metrics=[\n 'NetUnblendedCost'\n ],\n GroupBy=[\n {\n 'Type': 'DIMENSION',\n 'Key': 'LINKED_ACCOUNT'\n }\n ]\n )\n return response['ResultsByTime']\n\ndef lambda_handler(event, context):\n today = datetime.date.today()\n start = today.replace(day=1).strftime('%Y-%m-%d')\n end = today.strftime('%Y-%m-%d')\n key = 'daily-cost-' + today.strftime('%Y-%m') + '.csv'\n output_file = '/tmp/output.csv'\n bucket = os.environ['BUCKET']\n account_list = pandas.DataFrame(list_accounts(), columns=['Account Id', 'Account Name'])\n daily_cost_list = get_cost_json(start, end)\n\n merged_cost = pandas.DataFrame(\n index=[],\n columns=['Account Id']\n )\n\n for index, item in enumerate(daily_cost_list):\n normalized_json = pandas.json_normalize(item['Groups'])\n split_keys = pandas.DataFrame(\n normalized_json['Keys'].tolist(),\n columns=['Account Id']\n )\n cost = pandas.concat(\n [split_keys, normalized_json['Metrics.NetUnblendedCost.Amount']],\n axis=1\n )\n renamed_cost = cost.rename(\n columns={'Metrics.NetUnblendedCost.Amount': item['TimePeriod']['Start']}\n )\n merged_cost = pandas.merge(merged_cost, renamed_cost, on='Account Id', how='outer')\n\n daily_cost = pandas.merge(account_list, merged_cost, on='Account Id', how='right')\n daily_cost.to_csv(output_file, index=False)\n upload_s3(output_file, key, bucket)\n"
]
| [
[
"pandas.concat",
"pandas.DataFrame",
"pandas.merge",
"pandas.json_normalize"
]
]
|
CarloGraziani/BayesVaccineEfficacy | [
"895a1069de97eabff0fcae4de47e72d3f788ae73"
]
| [
"vb.py"
]
| [
"#!/usr/bin/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom priors import *\nimport sys\nsys.path.insert(0,\".\")\n\n############## Job Control ##################################\n# Default values, overridden by vb_in.py\n#\njobctl_0 = {\n \"trials\" : {\n \"Pfizer (Final)\" : {\"n_p\":262, \"n_v\":8, \"v2p_ratio\":1.0, \"xlo\":0.75},\n \"Pfizer (Severe)\" : {\"n_p\": 9, \"n_v\": 1, \"v2p_ratio\":1.0, \"xlo\":0.0},\n \"Moderna (Interim)\" : {\"n_p\":90, \"n_v\":5, \"v2p_ratio\":1.0, \"xlo\":0.70},\n \"Moderna (Final)\" : {\"n_p\":185, \"n_v\":11, \"v2p_ratio\":1.0, \"xlo\":0.75},\n \"Moderna (Severe)\" : {\"n_p\": 30, \"n_v\":0, \"v2p_ratio\":1.0, \"xlo\":0.70},\n \"Sputnik V (Interim)\" : {\"n_p\":31, \"n_v\":8, \"v2p_ratio\":3.0, \"xlo\":0.45},\n \"CoronaVac (Interim, Turkey)\" : {\"n_p\":26, \"n_v\":3, \"v2p_ratio\":752.0/570.0, \"xlo\":0.3},\n },\n \"cred\": 0.90, # Probability level of credible regions\n \"cred_lb\" : 0.99, # Probability level of lower bound\n \"nsamp\" : 1000, # Number of equal-spaced samples in [0,1] for the posterior\n \"prior\" : uniform_prior, # Prior choice, from 'priors.py'\n}\n\n# If vb_in.py exists and contains a dict called 'jobctl', use it to update jobctl_0\ntry:\n from vb_in import jobctl\n jobctl_0.update(jobctl)\n print(\"Imported job from vb_in.py\")\nexcept ImportError:\n print(\"No job imported, using defaults\")\n pass\n\n# All keys in jobctl_0 now to become variables:\nglobals().update(jobctl_0)\n\n################## Done with job control #####################\n\ncs=\"%4.1f\"%(cred*100.0)\nde = 1.0 / nsamp\neff = de * (np.arange(nsamp, dtype=np.float) + 0.5 )\n\ndef loglik(e,trial):\n ll = trial[\"n_v\"] * np.log(1.0-e) - \\\n (trial[\"n_p\"] + trial[\"n_v\"]) * np.log(1.0 + (1-e)*trial[\"v2p_ratio\"])\n return ll\n\nposterior = np.zeros(nsamp)\neff_ci = np.zeros(2)\nfsize=16\nfsize_l=12\nlw_ci=2\nlw_plot=3\nmsize=18\n\nfor trialname in trials.keys():\n\n trial = trials[trialname]\n\n ll = loglik(eff, trial)\n pr = prior(eff)\n llmax = np.max(ll)\n posterior = np.exp(ll - llmax) * pr\n norm = posterior.sum() * de\n posterior /= norm\n inds = np.argsort(posterior)[-1::-1]\n cum = posterior[inds].cumsum() * de\n lbcum = posterior[-1::-1].cumsum() * de\n lb_ind = nsamp-np.searchsorted(lbcum, cred_lb)\n eff_lb = eff[lb_ind]\n lb_x = list(eff[lb_ind:])\n lb_x.insert(0,eff[lb_ind])\n lb_x.append(eff[-1])\n lb_y = list(posterior[lb_ind:])\n lb_y.insert(0,0.0)\n lb_y.append(0.0)\n\n eff_mp = eff[inds[0]]\n eff_ci[0] = eff[inds[0]]\n eff_ci[1] = eff[inds[0]]\n ci_idx_lo = ci_idx_hi = inds[0]\n for samp in range(nsamp):\n if eff[inds[samp]] > eff_ci[1]:\n eff_ci[1] = eff[inds[samp]]\n ci_idx_hi = inds[samp]\n if eff[inds[samp]] < eff_ci[0]:\n eff_ci[0] = eff[inds[samp]]\n ci_idx_lo = inds[samp]\n if cum[samp] > cred:\n break\n ci_x = list(eff[ci_idx_lo:ci_idx_hi+1])\n ci_x.insert(0, eff[ci_idx_lo])\n ci_x.append(eff[ci_idx_hi])\n ci_y = list(posterior[ci_idx_lo:ci_idx_hi+1])\n ci_y.insert(0,0.0)\n ci_y.append(0.0)\n\n print(trialname +\n \": Max Posterior Effectiveness = %6.3f; %4.1f%% CI = [%6.3f, %6.3f]; %4.1f%% Lower Bound = %6.3f\\n\" %\n (eff_mp, cred*100.0, eff_ci[0], eff_ci[1], cred_lb*100.0, eff_lb) )\n\n fig = plt.figure()\n fig.set_figwidth(8.0)\n fig.set_figheight(8.0)\n ax = fig.add_subplot(1,1,1)\n\n ax.set_xlim([trial[\"xlo\"],1.0])\n ax.set_ylim(bottom=0.0, top=posterior[inds[0]]*1.2)\n ax.set_xlabel(\"Efficacy\", size=fsize)\n ax.set_ylabel(\"Posterior Density\", size=fsize)\n ax.tick_params(labelsize=fsize)\n\n ax.plot(eff,posterior,'b-', linewidth=lw_plot)\n ax.axvline(eff_mp, color=\"c\", linewidth=lw_plot,\n linestyle=\"--\",\n label='Max Posterior: Eff. = %5.3f'%(eff_mp) )\n\n ax.fill(ci_x, ci_y, color='r', alpha=0.4,\n label='%4.1f%% Credible Region:'%(cred*100) + ' Eff.$\\in$'+'[%5.3f,%5.3f]'%\n (eff_ci[0],eff_ci[1]))\n # ax.axvline(eff_ci[0], color='r', linewidth=lw_ci, linestyle=\":\")\n # ax.axvline(eff_ci[1], color='r', linewidth=lw_ci, linestyle=\":\")\n\n #ax.axvline(eff_lb, color='g', linewidth=lw_ci, linestyle=\"-.\")\n ax.fill(lb_x, lb_y, hatch=\"/\", fill=False,\n label=\"%4.1f%% Lower Bound: Eff. = %5.3f\" % (cred_lb*100, eff_lb))\n\n ax.legend(handlelength=4.0)\n\n ax.set_title(trialname + \": Placebo Infections = %d, Vaccine Infections = %d\\n Vaccine/Placebo Person-Year Ratio = %4.2f\" % (trial[\"n_p\"], trial[\"n_v\"], trial[\"v2p_ratio\"]) )\n\n plt.savefig(trialname +\".png\", format=\"png\")\n plt.close(fig)\n"
]
| [
[
"numpy.max",
"numpy.zeros",
"numpy.log",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.argsort",
"numpy.searchsorted"
]
]
|
linesd/Image-Classification | [
"e4dd9d5883bb25e0a730ea05a1d42eeee6d6a6ae"
]
| [
"classifier/training.py"
]
| [
"\nimport torch\nimport logging\nfrom timeit import default_timer\n\nclass Trainer():\n \"\"\"\n Class to handle training of model.\n Parameters\n ----------\n model: disvae.vae.VAE\n optimizer: torch.optim.Optimizer\n loss_f: disvae.models.BaseLoss\n Loss function.\n device: torch.device, optional\n Device on which to run the code.\n logger: logging.Logger, optional\n Logger.\n save_dir : str, optional\n Directory for saving logs.\n gif_visualizer : viz.Visualizer, optional\n Gif Visualizer that should return samples at every epochs.\n is_progress_bar: bool, optional\n Whether to use a progress bar for training.\n \"\"\"\n\n def __init__(self, model, optimizer, criterion,\n device=torch.device(\"cpu\"),\n save_dir=\"results\"):\n\n self.device = device\n self.model = model.to(self.device)\n self.criterion = criterion\n self.optimizer = optimizer\n self.save_dir = save_dir\n\n def __call__(self, data_loader, epochs=10):\n \"\"\"\n Trains the model.\n Parameters\n ----------\n data_loader: torch.utils.data.DataLoader\n epochs: int, optional\n Number of epochs to train the model for.\n checkpoint_every: int, optional\n Save a checkpoint of the trained model every n epoch.\n \"\"\"\n\n self.model.train()\n\n for epoch in range(epochs):\n print(\"EPOCH %d\" % (epoch + 1))\n mean_epoch_loss = self._train_epoch(data_loader, epoch)\n print('Average loss for epoch %d: %.3f' % (epoch + 1, mean_epoch_loss))\n\n self.model.eval()\n\n def _train_epoch(self, data_loader, epoch):\n \"\"\"\n Trains the model for one epoch.\n Parameters\n ----------\n data_loader: torch.utils.data.DataLoader\n storer: dict\n Dictionary in which to store important variables for vizualisation.\n epoch: int\n Epoch number\n Return\n ------\n mean_epoch_loss: float\n Mean loss per image\n \"\"\"\n running_loss = 0.0\n epoch_loss = 0.0\n for i, data in enumerate(data_loader):\n # pull the\n inputs, labels = data\n # zero the parameter gradients\n self.optimizer.zero_grad()\n\n # Forward, backward, optimize\n outputs = self.model(inputs)\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n\n # Print some stats\n running_loss += loss.item()\n epoch_loss += loss.item()\n if i % 200 == 199: # print every 2000 mini-batches\n print('[minibatch: %5d] loss: %.3f' % (i + 1, running_loss / 200))\n running_loss = 0.0\n\n return epoch_loss / len(data_loader)"
]
| [
[
"torch.device"
]
]
|
atharva-naik/HeadlineGen | [
"cbb36cfd6faa8ce36009eae6eddf6a073c10d9d8"
]
| [
"preprocess.py"
]
| [
"import os \nimport re \nimport copy\nimport json\nimport tqdm\nimport emoji\nimport sklearn \nimport pandas as pd\nfrom string import ascii_letters, digits\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer \n\ndef lower(text):\n return text.lower()\n\ndef strip(text):\n return text.strip()\n\ndef filter_charset(text):\n # hindi+englishset+.+digits\n return text\n\ndef remove_newline(text):\n return text.replace(\"\\n\", \" \")\n\ndef insert_newline(text):\n nltk_tokenizer = PunktSentenceTokenizer() \n nltk_tokenizer._params.abbrev_types.add('fig')\n nltk_tokenizer._params.abbrev_types.add('e.g')\n nltk_tokenizer._params.abbrev_types.add('rs')\n nltk_tokenizer._params.abbrev_types.add('dr')\n nltk_tokenizer._params.abbrev_types.add('pvt')\n nltk_tokenizer._params.abbrev_types.add('ltd')\n nltk_tokenizer._params.abbrev_types.add('inc')\n text=\"\\n\".join([i.strip() for i in nltk_tokenizer.tokenize(text)])\n text=text.replace('\\u0964',\"\\n\")\n return text\n\ndef remove_urls(text):\n return re.sub(r'http\\S+', '', text)\n\ndef remove_punctuation(text):\n for punct in '''\"',:`;%@!|\\&()#$+-_?<>*=~{}[]''':\n text = text.replace(punct, \" \")\n text = re.sub(\"\\s\\s+\", \" \", text)\n\n return text\n\ndef identity(text):\n return text\n\nVALID_FILTERS = {\"lower\":lower, \n \"strip\":strip,\n \"remove_urls\":remove_urls,\n \"remove_punctuation\":remove_punctuation,\n \"remove_newline\":remove_newline,\n \"insert_newline\":insert_newline}\n\n\nclass ProcessText:\n @classmethod\n def Sequential(cls, filts):\n obj = cls()\n obj.filts = []\n\n # c.valid_filts = VALID_FILTERS\n for filt in filts:\n obj.filts.append(obj.valid_filts.get(filt, identity))\n\n return obj\n\n def __init__(self):\n self.filts = [] \n self.valid_filts = VALID_FILTERS\n \n def add(self, filt):\n self.filts.append(filt)\n\n def run(self, text):\n for filt in self.filts:\n text = filt(text)\n\n return text \n\ndef process_dataset(path, pipeline, seed, valid_size=1/8, test_size=1/8, target=['Text', 'Headline'], keep=['Text', 'Headline'], rename=['text', 'summary']):\n if path.endswith(\".csv\"):\n dataset = pd.read_csv(path)\n elif path.endswith(\".xlsx\"):\n dataset = pd.read_excel(path)\n else: \n raise(TypeError(\"\\x1b[31mFile format not valid\\x1b[0m\"))\n\n dataset = dataset[keep]\n map_dict = {k:v for k,v in zip(keep, rename)}\n \n text_processor = ProcessText.Sequential(pipeline)\n proc_datatset = []\n \n for line in tqdm.tqdm(dataset.to_dict(\"records\")):\n for key in target:\n line[key] = text_processor.run(line[key])\n proc_datatset.append(line)\n total = len(dataset)\n\n proc_datatset = pd.DataFrame(proc_datatset)\n proc_datatset = proc_datatset.sample(frac=1, random_state=seed).reset_index(drop=True)\n proc_datatset.rename(columns=map_dict, inplace=True, errors='raise')\n test_size = int(total*test_size)\n valid_size = int(total*valid_size)\n train_size = total - test_size - valid_size\n \n proc_datatset = proc_datatset.to_dict('records')\n train = proc_datatset[ : train_size]\n val = proc_datatset[train_size : train_size + valid_size]\n test = proc_datatset[train_size + valid_size : ]\n\n train = pd.DataFrame(train)\n test = pd.DataFrame(test)\n val = pd.DataFrame(val)\n\n return train, val, test\n\nif __name__ == \"__main__\":\n df = pd.read_excel(\"/home/atharva/interiit/HeadlineGen/raw_data/Development Data/dev_data_article.xlsx\")\n print(df.head())\n pipeline = [\"strip\", \"remove_newline\", \"remove_url\", \"remove_punctuation\", \"insert_newline\", \"lower\"]\n # pipeline = [\"identity\"]\n train, val, test = process_dataset(\"/home/atharva/interiit/HeadlineGen/raw_data/Development Data/dev_data_article.xlsx\", pipeline, 69)\n print(train.head())\n print(val.head())\n print(test.head())\n# if TRANSLATE:\n# index = 0\n# def translate(text):\n# return text\n\n# for headline, article in zip(headlines, articles):\n# trans_dataset[index]['en_Headline'] = translate(headline)\n# trans_dataset[index]['en_Text'] = translate(article)\n# index += 1"
]
| [
[
"pandas.DataFrame",
"pandas.read_excel",
"pandas.read_csv"
]
]
|
latour-a/mole | [
"3e838d827bd6c2b1257dcf82acfbd02c8829d1ff"
]
| [
"tests/test_makedata.py"
]
| [
"# coding: utf8\n\"\"\"\nTeste les fonctions du module basecase.\n\"\"\"\n\nimport time\nimport shutil\nimport tempfile\nimport numpy as np\nfrom mole import makedata as mk\n\nclass TemporaryDirectory(object):\n \"\"\"\n Context manager pour gérer un répertoire temporaire (implémenté dans le\n module `tempfile` en Python 3).\n \"\"\"\n\n def __init__(self):\n self.name = tempfile.mkdtemp()\n\n def __enter__(self):\n return self.name\n\n def __exit__(self, exc, value, tb):\n shutil.rmtree(self.name)\n\nclass MockPb():\n \"\"\"\n Fausse classe implémentant les fonctions attendues des modules `pb` utilisés\n dans makedata.\n \"\"\"\n\n def __init__(self, shape, npoints, threshold):\n \"Initialise une nouvelle instance de la classe `MockPb`.\"\n self.shape = shape\n self.npoints = npoints\n self.threshold = threshold\n self._grid = np.random.randint(low=0, high=2, size=shape)\n self._solution = np.random.randint(low=0, high=2, size=shape)\n while np.all(self._grid == self._solution):\n self._solution = np.random.randint(low=0, high=2, size=shape)\n\n def generate(self, shape, npoints):\n \"Renvoie une fausse instance.\"\n return self._grid.copy()\n\n def admissible(self, grid, threshold):\n \"Indique si `grid` est une solution.\"\n return np.all(grid == self._solution)\n\n def solve(self, grid, threshold, name, compdir=None):\n \"Renvoie une fausse solution.\"\n return self._solution.copy()\n\ndef genparams(shape, threshold):\n \"Fonction créant un générateur de paramètres aléatoires.\"\n while True:\n npoints = np.random.binomial(np.product(shape), 0.5)\n yield mk.InstanceParams(shape, npoints, threshold)\n\ndef test_makeone():\n \"Teste la création d'une donnée via le module makedata.\"\n pb = MockPb((5, 5), 0, 3)\n params = mk.InstanceParams(pb.shape, pb.npoints, pb.threshold)\n with TemporaryDirectory() as tmpdir:\n # Création d'une donnée :\n output1 = mk.makeone(pb, params, tmpdir)\n grid, solution = mk.load(output1)\n got = pb.admissible(grid, params.threshold)\n assert (got == False)\n got = pb.admissible(solution, params.threshold)\n assert (got == True)\n # Création d'une autre donnée : même avec les mêmes paramètres, elle\n # doit être sauvegardé dans un fichier distinct.\n output2 = mk.makeone(pb, params, tmpdir)\n assert (output1 != output2)\n\ndef test_fmt():\n \"Teste l'homogénéisation des données lors de l'enregistrement.\"\n pb = MockPb((2, 5, 3), 1, 2)\n params = mk.InstanceParams(pb.shape, pb.npoints, pb.threshold)\n with TemporaryDirectory() as tmpdir:\n # Création d'une donnée :\n output = mk.makeone(pb, params, tmpdir)\n grid, solution = mk.load(output)\n assert (grid.shape == (5, 3, 2)) # Les axes ont été triés par taille.\n assert (solution.shape == (5, 3, 2))\n\ndef test_makeseveral():\n \"Teste la création de plusieurs données via le module makedata.\"\n pb = MockPb((5, 5), 0, 3)\n params = mk.InstanceParams(pb.shape, pb.npoints, pb.threshold)\n nsamples = 2\n maxtime = 0.5\n with TemporaryDirectory() as tmpdir:\n # Création de deux données :\n res = mk.makeseveral(pb, params, tmpdir, nsamples=nsamples)\n assert (len(res) == nsamples)\n # Utilisation d'une limite en temps :\n start = time.time()\n res = mk.makeseveral(pb, params, tmpdir, maxtime=maxtime)\n assert (len(res) >= 1)\n assert ((time.time() - start) >= maxtime)\n # Utilisation simultanée d'une limite en temps et en nombre de données :\n start = time.time()\n res = mk.makeseveral(pb, params, tmpdir, nsamples=nsamples, maxtime=maxtime)\n got = (len(res) == nsamples) or ((time.time() - start) > maxtime)\n assert (got == True)\n # Utilisation de paramètres aléatoires :\n res = mk.makeseveral(pb, genparams((5, 5), 3), tmpdir, nsamples=nsamples)\n assert (len(res) == nsamples)\n\n\n"
]
| [
[
"numpy.all",
"numpy.product",
"numpy.random.randint"
]
]
|
mengli/MachineLearning | [
"107a5be76aabbfd57a6395a6b7e1b9c55e06bbad"
]
| [
"kaggle/zillow/missing_data.py"
]
| [
"import pandas as pd\nimport matplotlib.pyplot as plt\n\nproperties_data = pd.read_csv('C:\\\\Users\\\\jowet\\\\Downloads\\\\zillow\\\\properties_2017.csv', low_memory=False)\n\nmissing_df = properties_data.isnull().sum(axis=0).reset_index()\nmissing_df.columns = ['column_name', 'missing_count']\nmissing_df = missing_df[missing_df.missing_count > 0]\nmissing_df = missing_df.sort_values(by='missing_count')\nmissing_df.plot(kind='barh')\nplt.yticks(range(missing_df.shape[0]), missing_df.column_name.values)\n\nplt.show()\n"
]
| [
[
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
georgefang/vega | [
"977054e12dd3bc1c96bbe35f18d5db4bc82d0522"
]
| [
"vega/algorithms/nas/esr_ea/esr_search.py"
]
| [
"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"search algorithm for ESR_EA.\"\"\"\nimport csv\nimport logging\nimport os\nfrom bisect import bisect_right\nfrom random import random, sample\nimport numpy as np\nimport pandas as pd\nfrom zeus.common.general import General\nfrom .conf import ESRConfig\nfrom zeus.common import FileOps\nfrom zeus.common import ClassFactory, ClassType\nfrom vega.core.search_algs import SearchAlgorithm\nfrom .esr_ea_individual import ESRIndividual\n\n\[email protected](ClassType.SEARCH_ALGORITHM)\nclass ESRSearch(SearchAlgorithm):\n \"\"\"Evolutionary search algorithm of the efficient super-resolution.\"\"\"\n\n config = ESRConfig()\n\n def __init__(self, search_space=None, **kwargs):\n \"\"\"Construct the ESR EA search class.\n\n :param search_space: config of the search space\n :type search_space: dictionary\n \"\"\"\n super(ESRSearch, self).__init__(search_space, **kwargs)\n self.individual_num = self.config.policy.num_individual\n self.generation_num = self.config.policy.num_generation\n self.elitism_num = self.config.policy.num_elitism\n self.mutation_rate = self.config.policy.mutation_rate\n self.min_active = self.config.range.min_active\n self.max_params = self.config.range.max_params\n self.min_params = self.config.range.min_params\n\n self.indiv_count = 0\n self.evolution_count = 0\n self.initialize_pop()\n self.elitism = [ESRIndividual(self.codec) for _ in range(self.elitism_num)]\n self.elit_fitness = [0] * self.elitism_num\n self.fitness_pop = [0] * self.individual_num\n self.fit_state = [0] * self.individual_num\n\n @property\n def is_completed(self):\n \"\"\"Tell whether the search process is completed.\n\n :return: True is completed, or False otherwise\n :rtype: bool\n \"\"\"\n return self.indiv_count > self.generation_num * self.individual_num\n\n def update_fitness(self, evals):\n \"\"\"Update the fitness of each individual.\n\n :param evals: the evalution\n :type evals: list\n \"\"\"\n for i in range(self.individual_num):\n self.pop[i].update_fitness(evals[i])\n\n def update_elitism(self, evaluations):\n \"\"\"Update the elitism and its fitness.\n\n :param evaluations: evaluations result\n :type evaluations: list\n \"\"\"\n popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]\n for i in range(self.elitism_num + self.individual_num):\n if i < self.elitism_num:\n popu_all[i].copy(self.elitism[i])\n else:\n popu_all[i].copy(self.pop[i - self.elitism_num])\n fitness_all = self.elit_fitness + evaluations\n sorted_ind = sorted(range(len(fitness_all)), key=lambda k: fitness_all[k])\n for i in range(self.elitism_num):\n self.elitism[i].copy(popu_all[sorted_ind[len(fitness_all) - 1 - i]])\n self.elit_fitness[i] = fitness_all[sorted_ind[len(fitness_all) - 1 - i]]\n logging.info('Generation: {}, updated elitism fitness: {}'.format(self.evolution_count, self.elit_fitness))\n\n def _log_data(self, net_info_type='active_only', pop=None, value=0):\n \"\"\"Get the evolution and network information of children.\n\n :param net_info_type: defaults to 'active_only'\n :type net_info_type: str\n :param pop: defaults to None\n :type pop: list\n :param value: defaults to 0\n :type value: int\n :return: log_list\n :rtype: list\n \"\"\"\n log_list = [value, pop.parameter, pop.flops]\n if net_info_type == 'active_only':\n log_list.append(pop.active_net_list())\n elif net_info_type == 'full':\n log_list += pop.gene.flatten().tolist()\n else:\n pass\n return log_list\n\n def save_results(self):\n \"\"\"Save the results of evolution contains the information of pupulation and elitism.\"\"\"\n _path = FileOps.join_path(self.local_output_path, General.step_name)\n FileOps.make_dir(_path)\n arch_file = FileOps.join_path(_path, 'arch.txt')\n arch_child = FileOps.join_path(_path, 'arch_child.txt')\n sel_arch_file = FileOps.join_path(_path, 'selected_arch.npy')\n sel_arch = []\n with open(arch_file, 'a') as fw_a, open(arch_child, 'a') as fw_ac:\n writer_a = csv.writer(fw_a, lineterminator='\\n')\n writer_ac = csv.writer(fw_ac, lineterminator='\\n')\n writer_ac.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])\n for c in range(self.individual_num):\n writer_ac.writerow(\n self._log_data(net_info_type='active_only', pop=self.pop[c],\n value=self.pop[c].fitness))\n\n writer_a.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])\n for c in range(self.elitism_num):\n writer_a.writerow(self._log_data(net_info_type='active_only',\n pop=self.elitism[c],\n value=self.elit_fitness[c]))\n sel_arch.append(self.elitism[c].gene)\n sel_arch = np.stack(sel_arch)\n np.save(sel_arch_file, sel_arch)\n if self.backup_base_path is not None:\n FileOps.copy_folder(self.local_output_path, self.backup_base_path)\n\n def parent_select(self, parent_num=2, select_type='Tournament'):\n \"\"\"Select parent from a population with Tournament or Roulette.\n\n :param parent_num: number of parents\n :type parent_num: int\n :param select_type: select_type, defaults to 'Tournament'\n :type select_type: str\n :return: the selected parent individuals\n :rtype: list\n \"\"\"\n popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]\n parent = [ESRIndividual(self.codec) for _ in range(parent_num)]\n fitness_all = self.elit_fitness\n for i in range(self.elitism_num + self.individual_num):\n if i < self.elitism_num:\n popu_all[i].copy(self.elitism[i])\n else:\n popu_all[i].copy(self.pop[i - self.elitism_num])\n fitness_all = fitness_all + [popu_all[i].fitness]\n fitness_all = np.asarray(fitness_all)\n if select_type == 'Tournament':\n for i in range(parent_num):\n tourn = sample(range(len(popu_all)), 2)\n if fitness_all[tourn[0]] >= fitness_all[tourn[1]]:\n parent[i].copy(popu_all[tourn[0]])\n fitness_all[tourn[0]] = 0\n else:\n parent[i] = popu_all[tourn[1]]\n fitness_all[tourn[1]] = 0\n elif select_type == 'Roulette':\n eval_submean = fitness_all - np.min(fitness_all)\n eval_norm = eval_submean / sum(eval_submean)\n eva_threshold = np.cumsum(eval_norm)\n for i in range(parent_num):\n ran = random()\n selec_id = bisect_right(eva_threshold, ran)\n parent[i].copy(popu_all[selec_id])\n eval_submean[selec_id] = 0\n eval_norm = eval_submean / sum(eval_submean)\n eva_threshold = np.cumsum(eval_norm)\n else:\n logging.info('Wrong selection type')\n return parent\n\n def initialize_pop(self):\n \"\"\"Initialize the population of first generation.\"\"\"\n self.pop = [ESRIndividual(self.codec) for _ in range(self.individual_num)]\n for i in range(self.individual_num):\n while self.pop[i].active_num < self.min_active:\n self.pop[i].mutation_using(self.mutation_rate)\n while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:\n self.pop[i].mutation_node(self.mutation_rate)\n\n def get_mutate_child(self, muta_num):\n \"\"\"Generate the mutated children of the next offspring with mutation operation.\n\n :param muta_num: number of mutated children\n :type muta_num: int\n \"\"\"\n for i in range(muta_num):\n if int(self.individual_num / 2) == len(self.elitism):\n self.pop[i].copy(self.elitism[i])\n else:\n self.pop[i].copy(sample(self.elitism, 1)[0])\n self.pop[i].mutation_using(self.mutation_rate)\n while self.pop[i].active_num < self.min_active:\n self.pop[i].mutation_using(self.mutation_rate)\n self.pop[i].mutation_node(self.mutation_rate)\n while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:\n self.pop[i].mutation_node(self.mutation_rate)\n\n def get_cross_child(self, muta_num):\n \"\"\"Generate the children of the next offspring with crossover operation.\n\n :param muta_num: number of mutated children\n :type muta_num: int\n \"\"\"\n for i in range(int(self.individual_num / 4)):\n pop_id = muta_num + i * 2\n father, mother = self.parent_select(2, 'Roulette')\n length = np.random.randint(4, int(father.gene.shape[0] / 2))\n location = np.random.randint(0, father.gene.shape[0] - length)\n gene_1 = father.gene.copy()\n gene_2 = mother.gene.copy()\n gene_1[location:(location + length), :] = gene_2[location:(location + length), :]\n gene_2[location:(location + length), :] = father.gene[location:(location + length), :]\n self.pop[pop_id].update_gene(gene_1)\n self.pop[pop_id + 1].update_gene(gene_2)\n while self.pop[pop_id].active_num < self.min_active:\n self.pop[pop_id].mutation_using(self.mutation_rate)\n param = self.pop[pop_id].parameter\n while param > self.max_params or param < self.min_params:\n self.pop[pop_id].mutation_node(self.mutation_rate)\n param = self.pop[pop_id].parameter\n while self.pop[pop_id + 1].active_num < self.min_active:\n self.pop[pop_id + 1].mutation_using(self.mutation_rate)\n param = self.pop[pop_id + 1].parameter\n while param > self.max_params or param < self.min_params:\n self.pop[pop_id + 1].mutation_node(self.mutation_rate)\n param = self.pop[pop_id + 1].parameter\n\n def reproduction(self):\n \"\"\"Generate the new offsprings.\"\"\"\n muta_num = self.individual_num - (self.individual_num // 4) * 2\n self.get_mutate_child(muta_num)\n self.get_cross_child(muta_num)\n\n def update(self, record):\n \"\"\"Update function.\n\n :param local_worker_path: the local path that saved `performance.txt`.\n :type local_worker_path: str\n \"\"\"\n worker_id = int(record.get(\"worker_id\"))\n performance = float(record.get(\"rewards\"))\n self.fitness_pop[(worker_id - 1) % self.individual_num] = performance\n self.fit_state[(worker_id - 1) % self.individual_num] = 1\n\n def get_fitness(self):\n \"\"\"Get the evalutation of each individual.\n\n :return: a list of evaluations\n :rtype: list\n \"\"\"\n pd_path = os.path.join(self.local_output_path, 'population_fitness.csv')\n with open(pd_path, \"r\") as file:\n df = pd.read_csv(file)\n fitness_all = df['PSNR'].values\n fitness = fitness_all[fitness_all.size - self.individual_num:]\n return list(fitness)\n\n def search(self):\n \"\"\"Search one random model.\n\n :return: current number of samples, and the model\n :rtype: int and class\n \"\"\"\n if self.indiv_count > 0 and self.indiv_count % self.individual_num == 0:\n if np.sum(np.asarray(self.fit_state)) < self.individual_num:\n return\n else:\n self.update_fitness(self.fitness_pop)\n self.update_elitism(self.fitness_pop)\n self.save_results()\n self.reproduction()\n self.evolution_count += 1\n self.fitness_pop = [0] * self.individual_num\n self.fit_state = [0] * self.individual_num\n current_indiv = self.pop[self.indiv_count % self.individual_num]\n indiv_cfg = self.codec.decode(current_indiv)\n self.indiv_count += 1\n logging.info('model parameters:{}, model flops:{}'.format(current_indiv.parameter, current_indiv.flops))\n logging.info('model arch:{}'.format(current_indiv.active_net_list()))\n return self.indiv_count, indiv_cfg\n\n @property\n def max_samples(self):\n \"\"\"Get max samples number.\"\"\"\n return self.generation_num * self.individual_num\n"
]
| [
[
"numpy.asarray",
"numpy.min",
"numpy.save",
"numpy.stack",
"numpy.random.randint",
"numpy.cumsum",
"pandas.read_csv"
]
]
|
tomsilver/pytorch-a2c-ppo-acktr | [
"8b714843eafe8fad622281a0ca8337ccb5f65895"
]
| [
"null_agent.py"
]
| [
"from gym import spaces\n\nimport numpy as np\n\n\nclass NullAgent(object):\n\n def __init__(self, action_space):\n self.action_space = action_space\n self.discrete = isinstance(action_space, spaces.Discrete)\n\n def act(self, observation, reward, done):\n if self.discrete:\n return 0\n return np.zeros(self.action_space.shape[0])\n"
]
| [
[
"numpy.zeros"
]
]
|
betaveros/voxx | [
"33b37f1efca061fa4651b934fb302199b87a8d08"
]
| [
"buffers.py"
]
| [
"#####################################################################\n#\n# buffers.py\n#\n# Copyright (c) 2017, Eran Egozy\n#\n# Released under the MIT License (http://opensource.org/licenses/MIT)\n#\n#####################################################################\n\nimport numpy as np\n\n\n# First-in First-out buffer used for buffering audio data\nclass FIFOBuffer(object):\n def __init__(self, buf_size = 4096, buf_type = np.float):\n super(FIFOBuffer, self).__init__()\n\n self.buf_type = buf_type\n self.buffer = np.zeros(buf_size, dtype=buf_type)\n self.write_ptr = 0\n\n # how much space is available for writing\n def get_write_available(self):\n return len(self.buffer) - self.write_ptr\n\n # how much data is available for reading\n def get_read_available(self):\n return self.write_ptr\n\n # write 'signal' into buffer\n def write(self, signal):\n amt = len(signal)\n L = len(self.buffer)\n assert(self.write_ptr + amt <= L)\n self.buffer[self.write_ptr:self.write_ptr+amt] = signal\n self.write_ptr += amt\n\n # read 'amt' values from buffer\n def read(self, amt):\n assert(amt <= self.write_ptr)\n out = self.buffer[:amt].copy()\n remaining = self.write_ptr - amt\n self.buffer[0:remaining] = self.buffer[amt:self.write_ptr]\n self.write_ptr = remaining\n return out\n\n\n\ndef test_audio_buffer():\n ab = FIFOBuffer(50)\n assert( ab.get_write_available() == 50)\n assert( ab.get_read_available() == 0)\n\n ab.write(np.arange(0,25))\n assert( ab.get_read_available() == 25 )\n assert( (ab.read(20) == np.arange(0, 20)).all() )\n assert( ab.get_read_available() == 5 )\n assert( (ab.read(5) == np.arange(20, 25)).all() )\n\n ab.write(np.arange(0,40))\n assert( ab.get_read_available() == 40 )\n assert( (ab.read(20) == np.arange(0, 20)).all() )\n assert( ab.get_read_available() == 20 )\n assert( (ab.read(20) == np.arange(20, 40)).all() )\n assert( ab.get_read_available() == 0 )\n\n ab = FIFOBuffer(50)\n ab.write(np.arange(0,50))\n assert( ab.get_read_available() == 50 )\n assert( (ab.read(20) == np.arange(0, 20)).all() )\n assert( ab.get_read_available() == 30 )\n\n ab.write(np.arange(50,60))\n assert( ab.get_read_available() == 40 )\n assert( (ab.read(40) == np.arange(20, 60)).all() )\n assert( ab.get_read_available() == 0 )\n assert( ab.get_write_available() == 50 )\n\n ab.write(np.arange(0,50))\n assert( ab.get_read_available() == 50 )\n assert( (ab.read(20) == np.arange(0, 20)).all() )\n assert( ab.get_read_available() == 30 )\n\n ab.write(np.arange(50,60))\n assert( ab.get_read_available() == 40 )\n assert( (ab.read(40) == np.arange(20, 60)).all() )\n assert( ab.get_read_available() == 0 )\n assert( ab.get_write_available() == 50 )\n\n\n# testing\nif __name__ == \"__main__\":\n test_audio_buffer()\n"
]
| [
[
"numpy.arange",
"numpy.zeros"
]
]
|
paramraghavan/kids-py-learn | [
"cf9f032bc7c13104837ec9eaefb5bdaafc158fe2"
]
| [
"src/funstuff/zoom_word_cloud.py"
]
| [
"'''\nCreate background image for online work meetings and interviews.\nReferenced from :\nhttps://towardsdatascience.com/fun-valentines-day-gift-ideas-for-python-programmers-a27e87b9211b\n'''\nfrom wordcloud import WordCloud, STOPWORDS\nimport imageio\nimport matplotlib.pyplot as plt\n\ntext = 'bigdata,pyspark,aws,s3,emr,lambda,'\\\n 'api_gateway,dynamodb,elb,glue,iam,code_commit,redshift,docker, aws_batch,'\\\n 'ci_cd,serverless_framework,rds,java,python,shell_scripting,sql,neo4j,pyflask' \\\n ',aurora, oracle, postgres'\n\n\nprint(STOPWORDS)\nwordcloud = WordCloud(width=1900,\n height=1080,\n prefer_horizontal=0.5,\n #background_color=\"rgba(255, 255, 255, 0)\",\n #mode=\"RGBA\"\n ).generate(text)\n\n#plt.imshow(wordcloud, interpolation='bilinear')\nplt.imshow(wordcloud)\nplt.axis(\"off\")\nplt.show()\nwordcloud.to_file(\"zoom_background.png\")\n#plt.savefig(\"simple.png\")\n#wordcloud.to_file(\"simple.png\")"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
]
|
GF1447571253/YOLOX-main | [
"ca9ba43558306968a219cb941a0d23cc476837b1"
]
| [
"tools/train.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport argparse\nimport random\nimport sys\nimport warnings\nfrom loguru import logger\n\nimport torch\nimport torch.backends.cudnn as cudnn\n\nfrom yolox.core import Trainer, launch\nfrom yolox.exp import get_exp\nfrom yolox.utils import configure_nccl, configure_omp, get_num_devices\n\n\ndef make_parser():\n parser = argparse.ArgumentParser(\"YOLOX train parser\")\n parser.add_argument(\"-expn\", \"--experiment-name\", type=str, default=None)\n parser.add_argument(\"-n\", \"--name\", type=str, default=None, help=\"model name\")\n\n # distributed\n parser.add_argument(\n \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n )\n parser.add_argument(\n \"--dist-url\",\n default=None,\n type=str,\n help=\"url used to set up distributed training\",\n )\n parser.add_argument(\"-b\", \"--batch-size\", type=int, default=8, help=\"batch size\")\n parser.add_argument(\n \"-d\", \"--devices\", default=None, type=int, help=\"device for training\"\n )\n parser.add_argument(\n \"-f\",\n \"--exp_file\",\n default='../exps/default/yolox_l.py',\n type=str,\n help=\"plz input your expriment description file\",\n )\n parser.add_argument(\n \"--resume\", default=False, action=\"store_true\", help=\"resume training\"\n )\n parser.add_argument(\"-c\", \"--ckpt\", default=None, type=str, help=\"checkpoint file\")\n parser.add_argument(\n \"-e\",\n \"--start_epoch\",\n default=None,\n type=int,\n help=\"resume training start epoch\",\n )\n parser.add_argument(\n \"--num_machines\", default=1, type=int, help=\"num of node for training\"\n )\n parser.add_argument(\n \"--machine_rank\", default=0, type=int, help=\"node rank for multi-node training\"\n )\n parser.add_argument(\n \"--fp16\",\n dest=\"fp16\",\n default=False,\n action=\"store_true\",\n help=\"Adopting mix precision training.\",\n )\n parser.add_argument(\n \"--cache\",\n dest=\"cache\",\n default=False,\n action=\"store_true\",\n help=\"Caching imgs to RAM for fast training.\",\n )\n parser.add_argument(\n \"-o\",\n \"--occupy\",\n dest=\"occupy\",\n default=False,\n action=\"store_true\",\n help=\"occupy GPU memory first for training.\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\[email protected]\ndef main(exp, args):\n if exp.seed is not None:\n random.seed(exp.seed)\n torch.manual_seed(exp.seed)\n cudnn.deterministic = True\n warnings.warn(\n \"You have chosen to seed training. This will turn on the CUDNN deterministic setting, \"\n \"which can slow down your training considerably! You may see unexpected behavior \"\n \"when restarting from checkpoints.\"\n )\n\n # set environment variables for distributed training\n configure_nccl()\n configure_omp()\n cudnn.benchmark = True # 为整个网络的每个卷积层搜索最适合它的卷积实现算法,进而实现网络的加速。\n\n trainer = Trainer(exp, args)\n trainer.train()\n\n\nif __name__ == \"__main__\":\n args = make_parser().parse_args()\n print(sys.path)\n exp = get_exp(args.exp_file, args.name) # expriment description file / model name\n exp.merge(args.opts)\n\n if not args.experiment_name:\n args.experiment_name = exp.exp_name\n\n num_gpu = get_num_devices() if args.devices is None else args.devices\n assert num_gpu <= get_num_devices()\n\n dist_url = \"auto\" if args.dist_url is None else args.dist_url\n launch(\n main,\n num_gpu,\n args.num_machines,\n args.machine_rank,\n backend=args.dist_backend,\n dist_url=dist_url,\n args=(exp, args),\n )\n"
]
| [
[
"torch.manual_seed"
]
]
|
julien-vial-detambel/worldengine | [
"554747846d3e1d38921afd66be8459c5c9553554"
]
| [
"worldengine/imex/__init__.py"
]
| [
"try:\n from osgeo import gdal\nexcept ImportError:\n try:\n import gdal\n except ImportError:\n print(\"Unable to load GDAL support, no heightmap export possible.\")\n\nimport numpy\nimport os\nimport sys\nimport tempfile\n\n'''\nUseful CLI tools:\npython worldengine export seed_24106.world --export-format envi --export-datatype float32\ngdal_translate -srcwin 375 384 128 128 seed_24106_elevation-32.envi test.envi\ngdal_translate test.envi -r cubicspline -outsize 1000% 1000% test2.envi\ngdal_translate test2.envi -scale -of PNG -ot Byte test.png\ngdal_translate test2.envi -scale -ot int32 test3.envi\n'''\n\n\n'''\nWhenever a GDAL short-format (http://www.gdal.org/formats_list.html) is given\nand a unique mapping to a file suffix exists, it is looked up in gdal_mapper.\n\nTrivial ones (i.e. a call to lower() does the job) are not handled:\n BAG, BMP, BT, ECW, ERS, FITS, GIF, GTA, PNG, RIK, VRT, XPM\n\nAll other formats (>100) currently end up with their respective GDAL short-format\nconverted to lower-case and might need to be renamed by the user.\n'''\ngdal_mapper = { # TODO: Find a way to make GDAL provide this mapping.\n \"aig\" : \"adf\",\n \"bsb\" : \"kap\",\n \"doq1\" : \"doq\",\n \"doq2\" : \"doq\",\n \"esat\" : \"n1\",\n \"grib\" : \"grb\",\n \"gtiff\" : \"tif\",\n \"hfa\" : \"img\",\n \"jdem\" : \"mem\",\n \"jpeg\" : \"jpg\",\n \"msgn\" : \"nat\",\n \"terragen\": \"ter\",\n \"usgsdem\" : \"dem\",\n}\n\n\ndef export(world, export_filetype='GTiff', export_datatype='float32', export_dimensions=None,\n export_normalize=None, export_subset=None, path='seed_output'):\n try:\n gdal\n except NameError:\n print(\"Cannot export: please install pygdal.\")\n sys.exit(1)\n\n final_driver = gdal.GetDriverByName(export_filetype)\n if final_driver is None:\n print(\"%s driver not registered.\" % export_filetype)\n sys.exit(1)\n\n # try to find the proper file-suffix\n export_filetype = export_filetype.lower()\n if export_filetype in gdal_mapper:\n export_filetype = gdal_mapper[export_filetype]\n\n # Note: GDAL will throw informative errors on its own whenever\n # file type and data type cannot be matched.\n\n # translate export_datatype; http://www.gdal.org/gdal_8h.html#a22e22ce0a55036a96f652765793fb7a4\n export_datatype = export_datatype.lower()\n\n if export_datatype in ['gdt_byte', 'uint8', 'int8', 'byte', 'char']:\n bpp = 8 # GDAL does not support int8\n numpy_type = numpy.uint8\n gdal_type = gdal.GDT_Byte\n elif export_datatype in ['gdt_uint16', 'uint16']:\n bpp = 16\n numpy_type = numpy.uint16\n gdal_type = gdal.GDT_UInt16\n elif export_datatype in ['gdt_uint32', 'uint32']:\n bpp = 32\n numpy_type = numpy.uint32\n gdal_type = gdal.GDT_UInt32\n elif export_datatype in ['gdt_int16', 'int16']:\n bpp = 16\n numpy_type = numpy.int16\n gdal_type = gdal.GDT_Int16\n elif export_datatype in ['gdt_int32', 'int32', 'int']: # fallback for 'int'\n bpp = 32\n numpy_type = numpy.int32\n gdal_type = gdal.GDT_Int32\n elif export_datatype in ['gdt_float32', 'float32', 'float']: # fallback for 'float'\n bpp = 32\n numpy_type = numpy.float32\n gdal_type = gdal.GDT_Float32\n elif export_datatype in ['gdt_float64', 'float64']:\n bpp = 64\n numpy_type = numpy.float64\n gdal_type = gdal.GDT_Float64\n else:\n raise TypeError(\n \"Type of data not recognized or not supported by GDAL: %s\" % export_datatype)\n\n # massage data to scale between the absolute min and max\n elevation = numpy.copy(world.layers['elevation'].data)\n\n # switch to final data type; no rounding performed\n elevation = elevation.astype(numpy_type)\n\n # take elevation data and push it into an intermediate ENVI format,\n # some formats don't support being written by Create()\n inter_driver = gdal.GetDriverByName(\"ENVI\")\n fh_inter_file, inter_file = tempfile.mkstemp() # returns: (file-handle, absolute path)\n intermediate_ds = inter_driver.Create(inter_file, world.size.width, world.size.height, 1, gdal_type)\n band = intermediate_ds.GetRasterBand(1)\n band.WriteArray(elevation)\n band = None # dereference band\n intermediate_ds = None # save/flush and close\n\n # take the intermediate ENVI format and convert to final format\n intermediate_ds = gdal.Open(inter_file)\n\n # For more information about gdal_translate\n # https://svn.osgeo.org/gdal/trunk/autotest/utilities/test_gdal_translate_lib.py\n # https://github.com/dezhin/pygdal/blob/master/2.1.2/osgeo/gdal.py\n\n # re-size, normalize and blend if necessary\n width = height = None\n if export_dimensions is not None:\n width, height = export_dimensions\n\n # normalize data-set to the min/max allowed by data-type, typical for 8bpp\n scale_param = None\n if export_normalize is not None:\n min_norm, max_norm = export_normalize\n scale_param = [[elevation.min(), elevation.max(), min_norm, max_norm]]\n\n # apply changes to the dataset\n if export_dimensions or export_normalize:\n intermediate_ds = gdal.Translate(\n '', intermediate_ds, format='MEM', width=width, height=height,\n scaleParams=scale_param, resampleAlg=gdal.GRA_CubicSpline\n #exponents=str(2)\n )\n\n # only use a specific subset of dataset\n if export_subset is not None:\n intermediate_ds = gdal.Translate('', intermediate_ds, format='MEM', srcWin=export_subset)\n\n final_driver.CreateCopy('%s-%d.%s' % (path, bpp, export_filetype), intermediate_ds)\n\n intermediate_ds = None # dereference\n os.close(fh_inter_file)\n os.remove(inter_file)\n"
]
| [
[
"numpy.copy"
]
]
|
divshacker/qiskit-ode | [
"3b5d7afb1a80faea9b489f1d79b09c1e52580107"
]
| [
"test/ode/test_solve_lmde.py"
]
| [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=invalid-name,broad-except\n\n\"\"\"Tests for solve_lmde and related functions.\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import expm\n\nfrom qiskit_ode.models import GeneratorModel\nfrom qiskit_ode.signals import Signal\nfrom qiskit_ode import solve_lmde\nfrom qiskit_ode.solve import setup_lmde_frames_and_generator, lmde_y0_reshape\nfrom qiskit_ode.dispatch import Array\n\nfrom .common import QiskitOdeTestCase, TestJaxBase\n\n\nclass TestLMDESetup(QiskitOdeTestCase):\n \"\"\"Test solve_lmde helper functions.\"\"\"\n\n def setUp(self):\n self.X = Array([[0.0, 1.0], [1.0, 0.0]], dtype=complex)\n self.Y = Array([[0.0, -1j], [1j, 0.0]], dtype=complex)\n self.Z = Array([[1.0, 0.0], [0.0, -1.0]], dtype=complex)\n\n # define a basic model\n w = 2.0\n r = 0.5\n operators = [-1j * 2 * np.pi * self.Z / 2, -1j * 2 * np.pi * r * self.X / 2]\n signals = [w, Signal(1.0, w)]\n\n self.w = 2\n self.r = r\n self.basic_model = GeneratorModel(operators=operators, signals=signals)\n\n self.y0 = Array([1.0, 0.0], dtype=complex)\n\n def test_auto_frame_handling(self):\n \"\"\"Test automatic setting of frames.\"\"\"\n\n self.basic_model.frame = self.X\n\n input_frame, output_frame, generator = setup_lmde_frames_and_generator(self.basic_model)\n\n self.assertTrue(\n np.allclose(input_frame.frame_operator, Array([[0.0, 1.0], [1.0, 0.0]], dtype=complex))\n )\n self.assertTrue(\n np.allclose(output_frame.frame_operator, Array([[0.0, 1.0], [1.0, 0.0]], dtype=complex))\n )\n self.assertTrue(\n np.allclose(generator.frame.frame_operator, -1j * 2 * np.pi * self.w * self.Z / 2)\n )\n\n def test_y0_reshape(self):\n \"\"\"Test automatic detection of vectorized LMDE.\"\"\"\n\n y0 = Array(np.eye(2))\n\n output = lmde_y0_reshape(4, y0)\n expected = y0.flatten(order=\"F\")\n\n self.assertAllClose(output, expected)\n\n def test_solver_cutoff_freq(self):\n \"\"\"Test correct setting of solver cutoff freq.\"\"\"\n _, _, generator = setup_lmde_frames_and_generator(\n self.basic_model, solver_cutoff_freq=2 * self.w\n )\n\n self.assertTrue(generator.cutoff_freq == 2 * self.w)\n self.assertTrue(self.basic_model.cutoff_freq is None)\n\n def test_generator(self):\n \"\"\"Test correct evaluation of generator.\n The generator is evaluated in the solver frame in a basis in which the\n frame operator is diagonal.\n \"\"\"\n\n _, _, generator = setup_lmde_frames_and_generator(self.basic_model, solver_frame=self.X)\n\n t = 13.1231\n\n output = generator(t, in_frame_basis=True).data\n\n X = np.array(self.X.data)\n X_diag, U = np.linalg.eigh(X)\n Uadj = U.conj().transpose()\n gen = (\n -1j\n * 2\n * np.pi\n * (self.w * np.array(self.Z.data) / 2 + self.r * np.cos(2 * np.pi * self.w * t) * X / 2)\n )\n expected = Uadj @ expm(1j * t * X) @ gen @ expm(-1j * t * X) @ U + 1j * np.diag(X_diag)\n\n self.assertAllClose(expected, output)\n\n def test_rhs(self):\n \"\"\"Test correct evaluation of rhs.\n The generator is evaluated in the solver frame in a basis in which the\n frame operator is diagonal.\n \"\"\"\n\n _, _, generator = setup_lmde_frames_and_generator(self.basic_model, solver_frame=self.X)\n\n t = 13.1231\n y = np.eye(2, dtype=complex)\n\n output = generator(t, y, in_frame_basis=True).data\n\n X = np.array(self.X.data)\n X_diag, U = np.linalg.eigh(X)\n Uadj = U.conj().transpose()\n gen = (\n -1j\n * 2\n * np.pi\n * (self.w * np.array(self.Z.data) / 2 + self.r * np.cos(2 * np.pi * self.w * t) * X / 2)\n )\n expected = (\n Uadj @ expm(1j * t * X) @ gen @ expm(-1j * t * X) @ U + 1j * np.diag(X_diag)\n ) @ y\n\n self.assertTrue(np.allclose(expected, output))\n\n\nclass TestLMDESetupJax(TestLMDESetup, TestJaxBase):\n \"\"\"Jax version of TestLMDESetup tests.\n\n Note: This class has no body but contains tests due to inheritance.\n \"\"\"\n\n\n# pylint: disable=too-many-instance-attributes\nclass Testsolve_lmde_Base(QiskitOdeTestCase):\n \"\"\"Some reusable routines for high level solve_lmde tests.\"\"\"\n\n def setUp(self):\n self.t_span = [0.0, 1.0]\n self.y0 = Array(np.eye(2, dtype=complex))\n\n self.X = Array([[0.0, 1.0], [1.0, 0.0]], dtype=complex)\n self.Y = Array([[0.0, -1j], [1j, 0.0]], dtype=complex)\n self.Z = Array([[1.0, 0.0], [0.0, -1.0]], dtype=complex)\n\n # simple generator and rhs\n # pylint: disable=unused-argument\n def generator(t):\n return -1j * 2 * np.pi * self.X / 2\n\n self.basic_generator = generator\n\n def _fixed_step_LMDE_method_tests(self, method):\n results = solve_lmde(\n self.basic_generator, t_span=self.t_span, y0=self.y0, method=method, max_dt=0.1\n )\n\n expected = expm(-1j * np.pi * self.X.data)\n\n self.assertAllClose(results.y[-1], expected)\n\n\nclass Testsolve_lmde_scipy_expm(Testsolve_lmde_Base):\n \"\"\"Basic tests for solve_lmde with method=='expm'.\"\"\"\n\n def test_scipy_expm_solver(self):\n \"\"\"Test scipy_expm_solver.\"\"\"\n self._fixed_step_LMDE_method_tests(\"scipy_expm\")\n\n\nclass Testsolve_lmde_jax_expm(Testsolve_lmde_Base, TestJaxBase):\n \"\"\"Basic tests for solve_lmde with method=='jax_expm'.\"\"\"\n\n def test_jax_expm_solver(self):\n \"\"\"Test jax_expm_solver.\"\"\"\n self._fixed_step_LMDE_method_tests(\"jax_expm\")\n"
]
| [
[
"scipy.linalg.expm",
"numpy.array",
"numpy.linalg.eigh",
"numpy.eye",
"numpy.allclose",
"numpy.cos",
"numpy.diag"
]
]
|
LucasTsui0725/TextBox | [
"bb96e16bcfa63a7f84461b5678f7718566385a8d"
]
| [
"textbox/module/Discriminator/TextGANDiscriminator.py"
]
| [
"# @Time : 2020/11/24\n# @Author : Tianyi Tang\n# @Email : [email protected]\n\nr\"\"\"\nTextGAN Discriminator\n#####################\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom textbox.model.abstract_generator import UnconditionalGenerator\n\n\nclass TextGANDiscriminator(UnconditionalGenerator):\n r\"\"\"The discriminator of TextGAN.\n \"\"\"\n\n def __init__(self, config, dataset):\n super(TextGANDiscriminator, self).__init__(config, dataset)\n\n self.embedding_size = config['discriminator_embedding_size']\n self.hidden_size = config['hidden_size']\n self.l2_reg_lambda = config['l2_reg_lambda']\n self.mmd_lambda = config['mmd_lambda']\n self.recon_lambda = config['recon_lambda']\n self.dropout_rate = config['dropout_rate']\n self.filter_sizes = config['filter_sizes']\n self.filter_nums = config['filter_nums']\n self.max_length = config['max_seq_length'] + 2\n self.gaussian_sigmas = torch.tensor(config['gaussian_sigmas'], device=self.device)\n self.pad_idx = dataset.padding_token_idx\n self.filter_sum = sum(self.filter_nums)\n\n self.word_embedding = nn.Embedding(self.vocab_size, self.embedding_size, padding_idx=self.pad_idx)\n self.dropout = nn.Dropout(self.dropout_rate)\n self.filters = nn.ModuleList([])\n\n for (filter_size, filter_num) in zip(self.filter_sizes, self.filter_nums):\n self.filters.append(\n nn.Sequential(\n nn.Conv2d(1, filter_num, (filter_size, self.embedding_size)), nn.ReLU(),\n nn.MaxPool2d((self.max_length - filter_size + 1, 1))\n )\n )\n\n self.W_O = nn.Linear(self.filter_sum, 1)\n self.recon = nn.Linear(self.filter_sum, self.hidden_size)\n\n def feature(self, data): # b * len * v\n r\"\"\"Get the feature map extracted from CNN for data.\n\n Args:\n data (torch.Tensor): The data to be extraced, shape: [batch_size, max_seq_len, vocab_size].\n\n Returns:\n torch.Tensor: The feature of data, shape: [batch_size, total_filter_num].\n \"\"\"\n data = torch.matmul(data.float(), self.word_embedding.weight).unsqueeze(1) # b * len * e -> b * 1 * len * e\n combined_outputs = []\n for CNN_filter in self.filters:\n output = CNN_filter(data).squeeze(-1).squeeze(-1) # b * f_n * 1 * 1 -> b * f_n\n combined_outputs.append(output)\n combined_outputs = torch.cat(combined_outputs, 1) # b * tot_f_n\n combined_outputs = self.dropout(combined_outputs)\n\n return combined_outputs\n\n def forward(self, data): # b * len * v\n r\"\"\"Calculate the probability that the data is realistic.\n\n Args:\n data (torch.Tensor): The sentence data, shape: [batch_size, max_seq_len, vocab_size].\n\n Returns:\n torch.Tensor: The probability that each sentence is realistic, shape: [batch_size].\n \"\"\"\n features = self.feature(data) # b * tot_f_n\n y_hat = torch.sigmoid(self.W_O(features)).squeeze(1) # b\n return y_hat\n\n def _calculate_gan_loss(self, real_data, fake_data):\n r\"\"\"Calculate the vanilla gan loss for real data and fake data.\n\n Args:\n real_data (torch.Tensor): The realistic sentence data, shape: [batch_size, max_seq_len].\n fake_data (torch.Tensor): The generated sentence data, shape: [batch_size, max_seq_len].\n\n Returns:\n torch.Tensor: The calculated gan loss of real data and fake data, shape: [].\n \"\"\"\n real_y = self.forward(real_data)\n fake_y = self.forward(fake_data)\n real_label = torch.ones_like(real_y)\n fake_label = torch.zeros_like(fake_y)\n\n real_loss = F.binary_cross_entropy(real_y, real_label)\n fake_loss = F.binary_cross_entropy(fake_y, fake_label)\n loss = (real_loss + fake_loss) / 2\n\n return loss\n\n def _gaussian_kernel_matrix(self, x, y): # b * tot_f_n, b * tot_f_n\n r\"\"\"Conduct gaussian kernel for feature x and y.\n\n Args:\n x (torch.Tensor): One feature map, shape: [batch_size, total_filter_num].\n y (torch.Tensor): The other feature map, shape: [batch_size, total_filter_num].\n\n Returns:\n torch.Tensor: The result after conducting gaussian kernel, shape: [batch_size, batch_size].\n \"\"\"\n beta = 1. / (2. * self.gaussian_sigmas.unsqueeze(1)) # sig_n * 1\n dist = torch.pow((x.unsqueeze(2) - y.T).norm(dim=1),\n 2).T # b * t * 1 - t * b -> b * t * b - b * t * b -> b * t * b -> b * b\n s = torch.matmul(beta, dist.reshape(1, -1)) # sig_n * 1 x 1 * (b * b) -> sig_n * (b * b)\n return torch.exp(-s).sum(dim=0).reshape_as(dist) # sig_n * (b * b) -> (b * b) -> b * b\n\n def _calculate_mmd_loss(self, x, y):\n r\"\"\"Calculate the maximum mean discrepancy loss for feature x and y.\n\n Args:\n x (torch.Tensor): One feature map, shape: [batch_size, total_filter_num].\n y (torch.Tensor): The other feature map, shape: [batch_size, total_filter_num].\n\n Returns:\n torch.Tensor: The calculated mmd loss of x, y, shape: [].\n \"\"\"\n cost = self._gaussian_kernel_matrix(x, x).mean()\n cost += self._gaussian_kernel_matrix(y, y).mean()\n cost -= 2 * self._gaussian_kernel_matrix(x, y).mean()\n return cost\n\n def _calculate_recon_loss(self, fake_feature, z): # b * tot_f_n, b * h\n r\"\"\"Calculate the reconstructed loss for fake feature and latent code z.\n\n Args:\n fake_feature (torch.Tensor): The feature map of generated data, shape: [batch_size, total_filter_num].\n z (torch.Tensor): The latent code for generation, shape: [batch_size, hidden_size].\n\n Returns:\n torch.Tensor: The calculated recon loss of fake feature and latent code z, shape: [].\n \"\"\"\n z_hat = self.recon(fake_feature) # b * h\n return (z - z_hat).norm(dim=1).mean() # b * h -> b -> 1\n\n def calculate_g_loss(self, real_data, fake_data):\n r\"\"\"Calculate the maximum mean discrepancy loss for real data and fake data.\n\n Args:\n real_data (torch.Tensor): The realistic sentence data, shape: [batch_size, max_seq_len].\n fake_data (torch.Tensor): The generated sentence data, shape: [batch_size, max_seq_len].\n\n Returns:\n torch.Tensor: The calculated mmd loss of real data and fake data, shape: [].\n \"\"\"\n real_feature = self.feature(real_data) # b * tot_f_n\n fake_feature = self.feature(fake_data) # b * tot_f_n\n mmd_loss = self._calculate_mmd_loss(real_feature, fake_feature)\n return mmd_loss\n\n def calculate_loss(self, real_data, fake_data, z):\n r\"\"\"Calculate the loss for real data and fake data.\n\n Args:\n real_data (torch.Tensor): The realistic sentence data, shape: [batch_size, max_seq_len].\n fake_data (torch.Tensor): The generated sentence data, shape: [batch_size, max_seq_len].\n z (torch.Tensor): The latent code for generation, shape: [batch_size, hidden_size].\n\n Returns:\n torch.Tensor: The calculated loss of real data and fake data, shape: [].\n \"\"\"\n gan_loss = self._calculate_gan_loss(real_data, fake_data)\n real_feature = self.feature(real_data) # b * tot_f_n\n fake_feature = self.feature(fake_data) # b * tot_f_n\n mmd_loss = -self.mmd_lambda * self._calculate_mmd_loss(real_feature, fake_feature)\n recon_loss = self.recon_lambda * self._calculate_recon_loss(fake_feature, z)\n l2_reg_loss = self.l2_reg_lambda * (self.W_O.weight.norm() + self.W_O.bias.norm())\n\n loss = gan_loss + mmd_loss + recon_loss + l2_reg_loss\n return loss\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like",
"torch.exp",
"torch.nn.Embedding",
"torch.nn.functional.binary_cross_entropy"
]
]
|
FrancaCassol/cta-lstchain | [
"6f98711c8bb7464ac3781abc0b9de054477eefad"
]
| [
"lstchain/datachecks/dl1_checker.py"
]
| [
"\"\"\"\nFunctions to check the contents ofLST DL1 files and associated muon ring files\n\"\"\"\n\n__all__ = [\n 'check_dl1',\n 'process_dl1_file',\n 'plot_datacheck',\n 'plot_trigger_types',\n 'plot_mean_and_stddev',\n 'merge_dl1datacheck_files'\n]\n\nimport h5py\nimport logging\nimport matplotlib as mpl\nimport matplotlib.colors as colors\nimport matplotlib.dates as dates\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nimport numpy as np\nimport os\nimport pandas as pd\nimport tables\n\nfrom astropy import units as u\nfrom astropy.table import Table, vstack\nfrom ctapipe.coordinates import EngineeringCameraFrame\nfrom ctapipe.instrument import CameraGeometry\nfrom ctapipe.io import HDF5TableWriter\nfrom ctapipe.visualization import CameraDisplay\nfrom datetime import datetime\nfrom lstchain.datachecks.containers import DL1DataCheckContainer\nfrom lstchain.datachecks.containers import DL1DataCheckHistogramBins\nfrom lstchain.io.io import dl1_params_lstcam_key\nfrom lstchain.paths import parse_datacheck_dl1_filename, parse_dl1_filename, \\\n run_to_muon_filename, run_to_datacheck_dl1_filename\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom multiprocessing import Pool\nfrom pathlib import Path\nfrom scipy.stats import poisson, sem\n\n\ndef check_dl1(filenames, output_path, max_cores=4, create_pdf=False):\n \"\"\"\n\n Parameters\n ----------\n filenames: string, Path, or a list of them, _sorted_ (by growing subrun\n index). Name(s) of the input DL1 .h5 file(s)\n output_path: directory where output will be written\n max_cores: maximum number of processes that the function will spawn (each\n processing a different subrun)\n\n Returns\n -------\n None\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n # convert to list if it is not yet a a list:\n if not isinstance(filenames, list):\n filenames = [filenames]\n\n # Define output filename (overwrite if already existing).\n # If there is a single input file (i.e. a single subrun) then the output\n # file name will keep the subrun index. If there is more than one file\n # (i.e. several subruns) the output file name omit the subrun index.\n first_file = parse_dl1_filename(os.path.basename(filenames[0]))\n if len(filenames) == 1:\n datacheck_filename = run_to_datacheck_dl1_filename(first_file.tel_id,\n first_file.run,\n first_file.subrun)\n else:\n datacheck_filename = run_to_datacheck_dl1_filename(first_file.tel_id,\n first_file.run,\n None)\n datacheck_filename = Path(output_path, datacheck_filename)\n\n # the list dl1datacheck will contain one entry per subrun. Each entry is a\n # list of 3 containers of type DL1DataCheckContainer, one for pedestals,\n # one for flatfield events and one for cosmics\n\n # check that all files exist:\n for filename in filenames:\n if not os.path.exists(filename):\n logger.error(f'File {str(filename)} not found!')\n raise FileNotFoundError\n\n # now try to determine which trigger_type tag is more reliable for\n # identifying interlaved pedestals. We choose (for now) the one which\n # has more values == 32 which is the pedestal tag. The one called\n # \"trigger_type\" seems to be the TIB trigger type. The fastest way to do\n # this for the whole run seems to be using normal pytables:\n trig_tags = {'trigger_type': [], 'ucts_trigger_type': []}\n for filename in filenames:\n with tables.open_file(filename,\n root_uep='/dl1/event/telescope/parameters') as f:\n for name in trig_tags.keys():\n trig_tags[name].extend(f.root.LST_LSTCam.col(name))\n num_pedestals = {'trigger_type':\n (np.array(trig_tags['trigger_type']) == 32).sum(),\n 'ucts_trigger_type':\n (np.array(trig_tags['ucts_trigger_type']) == 32).sum()}\n logger.info(f'Number of == 32 (pedestal) trigger tags: {num_pedestals}')\n\n trigger_source = 'trigger_type'\n\n # Commented lines below, because ucts_trigger_type seems to be\n # systematically wrong, even when it has more \"pedestal tags\" (==32) than\n # trigger_type\n # if num_pedestals['ucts_trigger_type'] > num_pedestals['trigger_type']:\n # trigger_source = 'ucts_trigger_type'\n\n # create container for the histograms' binnings, to be saved in the hdf5\n # output file:\n histogram_binning = DL1DataCheckHistogramBins()\n\n # create the dl1_datacheck containers (one per subrun) for the three\n # event types, and add them to the list dl1datacheck:\n with Pool(max_cores) as pool:\n func_args = [(filename, histogram_binning, trigger_source) for\n filename in filenames]\n dl1datacheck = pool.starmap(process_dl1_file, func_args)\n # NOTE: the above does not seem to improve execution time on Mac OS X.\n # Perhaps related to numpy \"sharing\" between the processes?\n\n # or... process the files sequentially:\n # dl1datacheck = list([None]*len(filenames))\n # for i, filename in enumerate(filenames):\n # dl1datacheck[i] = process_dl1_file(filename, histogram_binning,\n # trigger_source)\n\n # NOTE: I do not think we may have memory problems, but if needed we could\n # write out the containers as they are produced.\n\n writer_conf = tables.Filters(complevel=9, complib='blosc:zstd',\n fletcher32=True)\n with HDF5TableWriter(datacheck_filename, filters=writer_conf) as writer:\n # write the containers (3 per subrun) to the dl1 data check output file\n # If container is None it means the filling was unsuccessful due to\n # no events of the given type. Write only filled containers:\n for dcheck in dl1datacheck:\n if dcheck[0] is not None:\n writer.write(\"dl1datacheck/pedestals\", dcheck[0])\n if dcheck[1] is not None:\n writer.write(\"dl1datacheck/flatfield\", dcheck[1])\n if dcheck[2] is not None:\n writer.write(\"dl1datacheck/cosmics\", dcheck[2])\n # write also the histogram binnings:\n writer.write(\"dl1datacheck/histogram_binning\", histogram_binning)\n\n # we assume that cam geom is the same in all files, & write the first one\n # we convert units from m to deg\n cam_description_table = \\\n Table.read(filenames[0], path='instrument/telescope/camera/LSTCam')\n geom = CameraGeometry.from_table(cam_description_table)\n geom.to_table().write(datacheck_filename,\n path=f'/instrument/telescope/camera/LSTCam',\n append=True, serialize_meta=True)\n\n # write out also which trigger tag has been used for finding pedestals:\n file = h5py.File(datacheck_filename, mode='a')\n file.create_dataset('/dl1datacheck/used_trigger_tag', (1,), 'S32',\n [trigger_source.encode('ascii')])\n file.close()\n\n # do the plots and save them to a pdf file. We will look for the muons fits\n # files in the same directory as the DL1 files (assuming all of them are\n # in the same directory as the first one!)\n if create_pdf:\n plot_datacheck(datacheck_filename, output_path,\n muons_dir=os.path.dirname(filenames[0]))\n\n return\n\n\ndef process_dl1_file(filename, bins, trigger_source='trigger_type'):\n \"\"\"\n\n Parameters\n ----------\n filename: string, or Path, input DL1 .h5 file to be checked\n bins: DL1DataCheckHistogramBins container indicating binning of histograms\n trigger_source: string, name of one of the trigger tags present in the\n DL1 file\n\n Returns\n -------\n dl1datacheck_pedestals, dl1datacheck_flatfield, dl1datacheck_cosmics\n Containers of type DL1DataCheckContainer, with info on the three types of\n events: interleaved pedestals, interleaved flatfield events, and cosmics.\n If one or more of them is None, it means they have not been filled,\n due to lack of events if the given type in the input DL1 file.\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n # define criteria for detecting flatfield events, since as of 20200418\n # there is no reliable event tagging for those. We require a minimum\n # fraction of pixels with a charge above a sufficiently large value:\n ff_min_pixel_charge_median = 40.\n ff_max_pixel_charge_stddev = 20.\n\n logger.info(f'Opening file {filename}')\n subrun_index = parse_dl1_filename(os.path.basename(filename)).subrun\n\n dl1datacheck_pedestals = DL1DataCheckContainer()\n dl1datacheck_flatfield = DL1DataCheckContainer()\n dl1datacheck_cosmics = DL1DataCheckContainer()\n\n cam_description_table = \\\n Table.read(filename, path='instrument/telescope/camera/LSTCam')\n geom = CameraGeometry.from_table(cam_description_table)\n optics_description_table = \\\n Table.read(filename, path='instrument/telescope/optics')\n equivalent_focal_length = \\\n optics_description_table['equivalent_focal_length']\n m2deg = np.rad2deg(u.m/equivalent_focal_length*u.rad)/u.m\n\n with tables.open_file(filename) as file:\n # unfortunately pandas.read_hdf does not seem compatible with\n # 'with... as...' statements\n parameters = pd.read_hdf(filename, key=dl1_params_lstcam_key)\n\n # convert parameters from meters to degrees:\n for var in ['r', 'width', 'length']:\n parameters[var] *= m2deg\n # time gradient from ns/m to ns/deg\n parameters['time_gradient'] /= m2deg\n\n # We do not convert the x,y, cog coordinates, because only in m can\n # CameraGeometry find the pixel where a given cog falls\n\n # in order to read in the images we have to use tables,\n # because pandas is not compatible with vector columns\n image_table = file.root.dl1.event.telescope.image.LST_LSTCam\n\n # create flatfield mask from the images table. For the time being,\n # trigger type tags are not reliable. We first identify flatfield events\n # by their looks.\n image = image_table.col('image')\n flatfield_mask = ((np.median(image, axis=1) >\n ff_min_pixel_charge_median) &\n (np.std(image, axis=1) <\n ff_max_pixel_charge_stddev))\n # obtain the corresponding mask for the parameters table:\n ff_indices = image_table.col('event_id')[flatfield_mask]\n params_flatfield_mask = np.array(\n [(True if evtid in ff_indices else False) for evtid in\n parameters['event_id']])\n\n # then use trigger_source (name of one of the trigger tags in the DL1\n # file) to try to identify pedestals on the parameters table (but we\n # trust better the above empirical identification of flatfield events):\n params_pedestal_mask = (parameters[trigger_source] == 32) & \\\n ~params_flatfield_mask\n # obtain the corresponding pedestal mask for the images table:\n ped_indices = np.array(parameters['event_id'][params_pedestal_mask])\n pedestal_mask = np.array([(True if evtid in ped_indices else False)\n for evtid in image_table.col('event_id')])\n\n # Now obtain by exclusion the masks for cosmics:\n cosmics_mask = ~(pedestal_mask | flatfield_mask)\n params_cosmics_mask = ~(params_pedestal_mask | params_flatfield_mask)\n\n logger.info(f' pedestals: {np.sum(pedestal_mask)}, '\n f' flatfield: {np.sum(flatfield_mask)}, '\n f' cosmics: {np.sum(cosmics_mask)}')\n\n # fill quantities which depend on event-wise (i.e. not\n # pixel-wise) parameters:\n if params_pedestal_mask.sum() > 0:\n dl1datacheck_pedestals.fill_event_wise_info(subrun_index,\n parameters,\n params_pedestal_mask,\n geom, bins)\n if params_flatfield_mask.sum() > 0:\n dl1datacheck_flatfield.fill_event_wise_info(subrun_index,\n parameters,\n params_flatfield_mask,\n geom, bins)\n if params_cosmics_mask.sum() > 0:\n dl1datacheck_cosmics.fill_event_wise_info(subrun_index, parameters,\n params_cosmics_mask,\n geom, bins)\n\n # now fill pixel-wise information:\n if pedestal_mask.sum() > 0:\n dl1datacheck_pedestals.fill_pixel_wise_info(image_table,\n pedestal_mask, bins,\n 'pedestals')\n if flatfield_mask.sum() > 0:\n dl1datacheck_flatfield.fill_pixel_wise_info(image_table,\n flatfield_mask, bins,\n 'flatfield')\n if cosmics_mask.sum() > 0:\n dl1datacheck_cosmics.fill_pixel_wise_info(image_table,\n cosmics_mask, bins,\n 'cosmics')\n\n # Return None for a container that has not been completely filled,\n # otherwise it will give trouble in the plotting stage.\n if pedestal_mask.sum() == 0 or params_pedestal_mask.sum() == 0:\n dl1datacheck_pedestals = None\n if flatfield_mask.sum() == 0 or params_flatfield_mask.sum() == 0:\n dl1datacheck_flatfield = None\n if cosmics_mask.sum() == 0 or params_cosmics_mask.sum() == 0:\n dl1datacheck_cosmics = None\n\n # in case event sof some type are missing, just issue a warning and\n # retun None for the corresponding container, to avoid catastrophic\n # failure when trying to write it out\n\n return dl1datacheck_pedestals, dl1datacheck_flatfield, \\\n dl1datacheck_cosmics\n\n\ndef plot_datacheck(datacheck_filename, out_path=None, muons_dir=None):\n \"\"\"\n\n Parameters\n ----------\n datacheck_filename: list of strings, or pathlib.Path, name(s) of .h5\n files produced by the function check_dl1, starting from DL1 event files\n If it is a list of file names, we expect each of the files to correspond to\n one subrun of the same run.\n out_path: optional; if not given, it will be the same of file filename\n\n Returns\n -------\n None\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n # aspect ratio of pdf pages:\n pagesize = [12., 7.5]\n\n # in case of >1 input file, we assume they correspond to subruns of a\n # given run. We merge them before proceeding:\n if isinstance(datacheck_filename, list):\n if len(datacheck_filename) > 1:\n merged_filename = merge_dl1datacheck_files(datacheck_filename)\n datacheck_filename = merged_filename\n else:\n # just a single .h5 file:\n datacheck_filename = datacheck_filename[0]\n\n pdf_filename = Path(datacheck_filename).with_suffix('.pdf')\n # set output directory if provided:\n if out_path is not None:\n pdf_filename = Path(out_path, pdf_filename.name)\n\n # Read camera geometry\n cam_description_table = \\\n Table.read(datacheck_filename,\n path='instrument/telescope/camera/LSTCam')\n geom = CameraGeometry.from_table(cam_description_table)\n engineering_geom = geom.transform_to(EngineeringCameraFrame())\n\n with PdfPages(pdf_filename) as pdf:\n # first deal with the DL1 datacheck file, created from DL1 event data:\n file = tables.open_file(datacheck_filename)\n # Read the binning of the stored histograms, and the info on\n # the source from which the trigger type info has been read:\n hist_binning = file.root.dl1datacheck.histogram_binning\n trigger_source = file.root.dl1datacheck.used_trigger_tag[0].decode()\n\n group = file.root.dl1datacheck\n # get the tables for each type of events, check first in each case that\n # the table exists\n\n if '/dl1datacheck/pedestals' in group:\n table_pedestals = file.root.dl1datacheck.pedestals\n else:\n logger.warning('No pedestals table found in ' +\n str(datacheck_filename))\n table_pedestals = None\n\n if '/dl1datacheck/flatfield' in group:\n table_flatfield = file.root.dl1datacheck.flatfield\n else:\n logger.warning('No flatfield table found in ' +\n str(datacheck_filename))\n table_flatfield = None\n\n if '/dl1datacheck/cosmics' in group:\n table_cosmics = file.root.dl1datacheck.cosmics\n else:\n logger.error('No cosmics table found in ' +\n str(datacheck_filename))\n raise RuntimeError\n\n dl1dcheck_tables = [table_flatfield, table_pedestals, table_cosmics]\n labels = ['flatfield (guessed)', 'pedestals (from '+trigger_source+')',\n 'cosmics']\n labels = [x for i, x in enumerate(labels)\n if dl1dcheck_tables[i] is not None]\n dl1dcheck_tables = [x for x in dl1dcheck_tables if x is not None]\n\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=pagesize)\n fig.tight_layout(pad=0.)\n plt.text(0.1, 0.7, os.path.basename(datacheck_filename),\n fontsize=32, horizontalalignment='left',\n verticalalignment='center')\n plt.text(0.1, 0.6, 'First shower event UTC: ', fontsize=24,\n horizontalalignment='left', verticalalignment='center')\n plt.text(0.1, 0.5, ' UCTS: '+\n str(datetime.utcfromtimestamp\\\n (table_cosmics.col('ucts_time')[0][0])),\n fontsize=24, horizontalalignment='left',\n verticalalignment='center')\n plt.text(0.1, 0.43, ' Dragon: '+\n str(datetime.utcfromtimestamp\\\n (table_cosmics.col('dragon_time')[0][0])),\n fontsize=24, horizontalalignment='left',\n verticalalignment='center')\n plt.text(0.1, 0.36, ' TIB: '+\n str(datetime.utcfromtimestamp\\\n (table_cosmics.col('tib_time')[0][0])),\n fontsize=24, horizontalalignment='left',\n verticalalignment='center')\n axes.axis('off')\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=pagesize)\n fig.tight_layout(pad=3.0, h_pad=3.0, w_pad=2.0)\n\n plot_trigger_types(dl1dcheck_tables, 'ucts_trigger_type', axes[0, 0])\n plot_trigger_types(dl1dcheck_tables, 'trigger_type', axes[0, 1])\n\n for table, label in zip(dl1dcheck_tables, labels):\n fmt = '-'\n # in case of just one subrun, to make index-wise plots visible:\n if len(table.col('subrun_index')) == 1:\n fmt = 'o'\n\n axes[1, 0].plot(table.col('subrun_index'), table.col('num_events'),\n fmt, label=label)\n # elapsed time: would better to take it always from the cosmics\n # table (will be closer to the true one), but number of entries\n # of tables can be different if e.g. pedestals or flatfield events\n # are missing in some subruns!\n elapsed_t = table.col('elapsed_time')\n axes[1, 1].plot(table.col('subrun_index'),\n table.col('num_events') / elapsed_t, fmt,\n label=label)\n axes[1, 0].set_ylabel('number of events')\n axes[1, 1].set_ylabel('rate (events/s)')\n for j in (0, 1):\n axes[1, j].set_xlabel('subrun index')\n axes[1, j].set_yscale('log')\n axes[1, j].legend(loc='best')\n\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=pagesize)\n fig.tight_layout(pad=3.0, h_pad=3.0, w_pad=3.0)\n\n for time_type in ['ucts_time', 'tib_time', 'dragon_time']:\n axes[0, 0].plot(table_cosmics.col('sampled_event_ids').flatten(),\n table_cosmics.col(time_type).flatten(),\n label=time_type)\n axes[0, 0].set_xlabel('event id')\n axes[0, 0].set_ylabel('timestamp')\n axes[0, 0].legend(loc='best')\n\n hist = 'hist_delta_t'\n bins = hist_binning.col(hist)[0]\n axes[0, 1].hist(bins[:-1], bins,\n weights=np.sum(table_cosmics.col(hist), axis=0),\n histtype='step')\n axes[0, 1].set_xlabel('delta_t (ms) from Dragon timestamp')\n axes[0, 1].set_ylabel('events')\n axes[0, 1].set_yscale('log')\n\n alt_deg = np.rad2deg(table_cosmics.col('mean_alt_tel'))\n axes[1, 0].plot(np.rad2deg(table_cosmics.col('mean_az_tel')), alt_deg,\n fmt)\n axes[1, 0].set_xlabel('telescope azimuth (deg)')\n axes[1, 0].set_ylabel('telescope altitude (deg)')\n axes[1, 0].xaxis.set_major_formatter(mtick.FormatStrFormatter('%.1f'))\n axes[1, 0].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1f'))\n\n dragon_time = table_cosmics.col('dragon_time')\n # dragon_time contains for each table row a number of times sampled at\n # regular event intervals. We get the mean per row (typically =subrun):\n mean_dragon_time = np.mean(dragon_time, axis=1)\n mpl_times = np.array([dates.date2num(datetime.utcfromtimestamp(x))\n for x in mean_dragon_time])\n axes[1, 1].plot_date(mpl_times, alt_deg, fmt=fmt, xdate=True,\n tz='utc')\n axes[1, 1].set_xlabel('time (UTC)')\n axes[1, 1].set_ylabel('telescope altitude (deg)')\n axes[1, 1].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1f'))\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=3, ncols=1, figsize=pagesize)\n fig.tight_layout(pad=3.0, h_pad=3.0, w_pad=3.0)\n\n for i, time_type in enumerate(['ucts_time', 'tib_time', 'dragon_time']):\n axes[i].plot(table_cosmics.col('sampled_event_ids').flatten(),\n table_cosmics.col(time_type).flatten(),\n label=time_type)\n axes[i].set_xlabel('event id')\n axes[i].set_ylabel('timestamp')\n axes[i].legend(loc='best')\n pdf.savefig()\n\n\n if table_pedestals is None or len(table_pedestals) == 0:\n write_error_page('pedestals', pagesize)\n else:\n plot_mean_and_stddev(table_pedestals, engineering_geom,\n ['charge_mean', 'charge_stddev'],\n ['Pedestal mean charge (p.e.)',\n 'Pedestal charge std dev (p.e.)',\n 'PEDESTALS, pixel-wise charge info'],\n pagesize, norm='log')\n pdf.savefig()\n\n if table_flatfield is None or len(table_flatfield) == 0:\n write_error_page('flatfield', pagesize)\n else:\n plot_mean_and_stddev(table_flatfield, engineering_geom,\n ['charge_mean', 'charge_stddev'],\n ['Flat-field mean charge (p.e.)',\n 'Flat-field charge std dev (p.e.)',\n 'FLATFIELD, pixel-wise charge info'], pagesize,\n norm='log')\n pdf.savefig()\n\n histograms = ['hist_pixelchargespectrum', 'hist_intensity',\n 'hist_npixels', 'hist_nislands']\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=pagesize)\n fig.tight_layout(pad=3.0, h_pad=3.0, w_pad=2.0)\n for i, hist in enumerate(histograms):\n bins = hist_binning.col(hist)[0]\n for table in dl1dcheck_tables:\n contents = np.sum(table.col(hist), axis=0)\n axes.flatten()[i].hist(bins[:-1], bins, histtype='step',\n weights=contents/contents.sum(),\n label=table.name)\n axes.flatten()[i].set_yscale('log')\n axes.flatten()[i].set_xscale('log')\n axes.flatten()[i].set_ylabel('fraction of events of the given type')\n axes[0, 0].legend(loc='best')\n axes[0, 0].set_xlabel('Pixel charge (p.e.)')\n axes[0, 1].set_xlabel('Image intensity (p.e.)')\n axes[1, 0].set_xlabel('Number of pixels in image')\n axes[1, 1].set_xlabel('Number of islands in image')\n pdf.savefig()\n\n # We now plot the pixel rates above a few thresholds.\n # Find the thresholds (in pe) for which the event numbers are stored:\n colnames = [name for name in table_cosmics.colnames\n if name.find('num_pulses_above') == 0]\n threshold = [int(name[name.find('above_')+6:name.find('_pe')])\n for name in colnames]\n\n for table, tname in zip([table_pedestals, table_cosmics],\n ['pedestals', 'flatfield']):\n if table is None or len(table) == 0:\n write_error_page(tname, pagesize)\n pdf.savefig()\n continue\n\n # We asume here that 5 such thresholds are present in the\n # dl1datacheck file\n fig, axes = plt.subplots(nrows=2, ncols=3, figsize=pagesize)\n fig.suptitle(table.name.upper() +\n ', relative frequency of pixel charges',\n fontsize='xx-large')\n fig.tight_layout(rect=[0, 0.03, 1, 0.95], pad=3.0, h_pad=3.0,\n w_pad=3.0)\n # sum (for all subruns) the number of events per pixel above the\n # different thresholds:\n pix_events = [np.sum(table.col(colname), axis=0)\n for colname in colnames]\n # total number of entries for this event type:\n norm = table.col('num_events').sum()\n if norm > 0:\n fraction = np.array(pix_events)/norm\n for i, frac in enumerate(fraction):\n zscale = 'log' if threshold[i] < 200 and frac.sum() > 0 \\\n else 'lin'\n cam = CameraDisplay(engineering_geom, frac,\n ax=axes.flatten()[i], norm=zscale,\n title='Fraction of >' + str(threshold[i]) +\n ' p.e. pulses')\n cam.add_colorbar(ax=axes.flatten()[i], format='%.0e', pad=0.01)\n # same range for all cameras:\n axes.flatten()[i].set_xlim((axes[0, 0].get_xlim()))\n cam.show()\n for i in [1, 2, 4]:\n axes.flatten()[i].set_ylabel('')\n axes[1, 2].set_xscale('log')\n axes[1, 2].set_yscale('log')\n\n fraction_transposed = fraction.transpose()\n for y in fraction_transposed:\n if y.sum() > 0:\n axes[1, 2].plot(threshold, y, 'o', fillstyle='none',\n alpha=0.2)\n axes[1, 2].set_xlabel('pixel charge (p.e.)')\n axes[1, 2].set_ylabel('fraction of events with charge>x')\n pdf.savefig()\n\n # Some plots on pulse times:\n if table_flatfield is None or len(table_flatfield) == 0:\n write_error_page('flatfield', pagesize)\n else:\n plot_mean_and_stddev(table_flatfield, engineering_geom,\n ['time_mean', 'time_stddev'],\n ['Flat-field mean time (ns)',\n 'Flat-field time std dev (ns)',\n 'FLATFIELD, pixel-wise pulse time info'],\n pagesize)\n pdf.savefig()\n plot_mean_and_stddev(table_flatfield, engineering_geom,\n ['relative_time_mean',\n 'relative_time_stddev'],\n ['Flat-field mean time (ns)',\n 'Flat-field time std dev (ns)',\n 'FLATFIELD, pixel-wise pulse time relative '\n 'to camera mean'],\n pagesize)\n\n pdf.savefig()\n\n plot_mean_and_stddev(table_cosmics, engineering_geom,\n ['time_mean', 'time_stddev'],\n ['Cosmics mean time (ns)',\n 'Cosmics time std dev (ns)',\n 'COSMICS, pixel-wise pulse time info for pixel '\n 'charge > 1 p.e.'], pagesize)\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=2, ncols=3, figsize=pagesize)\n fig.suptitle('COSMICS, image c.o.g. position', fontsize='xx-large')\n fig.tight_layout(rect=[0, 0.03, 1, 0.95], pad=3.0, h_pad=3.0, w_pad=3.0)\n items = ['cog_within_pixel', 'cog_within_pixel_intensity_gt_200']\n titles = ['Image c.o.g.', 'Image c.o.g., intensity>200pe']\n for i, item in enumerate(items):\n events_per_pix = np.sum(table_cosmics.col(item), axis=0)\n all_events = np.sum(events_per_pix)\n event_fraction = events_per_pix / all_events\n cam = CameraDisplay(engineering_geom, event_fraction, ax=axes[i, 0],\n norm='lin', title=titles[i])\n cam.add_colorbar(ax=axes[i, 0])\n cam.show()\n camlog = CameraDisplay(engineering_geom, event_fraction,\n ax=axes[i, 1], norm='log', title=titles[i])\n camlog.add_colorbar(ax=axes[i, 1])\n # lines below needed to get all camera displays of equal size:\n axes[i, 0].set_xlim((axes[0, 0].get_xlim()))\n axes[i, 1].set_xlim((axes[0, 0].get_xlim()))\n cam.show()\n # select pixels which are not on the edge of the camera:\n pix_inside = np.array([len(neig) == 6 for neig in geom.neighbors])\n # histogram the fraction of image cogs contained in those inner\n # pixels, to test homogeneity of distribution:\n # (only positive ones, for log-plotting)\n gt0 = event_fraction > 0\n axes[i, 2].set_xscale('log')\n\n nbins = 1001;\n epb = (events_per_pix[pix_inside & gt0].max() -\n events_per_pix[pix_inside & gt0].min()) / (nbins-1)\n epb = int(epb+1.)\n # make sure the same number of integers in each bin (otherwise we\n # will get \"spikes\" in the Poisson distributiopn later.number of\n # bins has to be large to achieve reasonable bin width with linear\n # binning, needed to avoid the spikes.\n xmin = events_per_pix[pix_inside & gt0].min()-0.5\n xmax = xmin + (nbins-1)*epb\n # convert to event fraction:\n xmin /= all_events\n xmax /= all_events\n\n _, bins, _ = axes[i, 2].\\\n hist(event_fraction[pix_inside & gt0],\n bins=np.linspace(xmin, xmax, nbins))\n #bins=np.logspace(np.log10(xmin), np.log10(xmax), nbins))\n # average event content:\n mu = np.sum(events_per_pix[pix_inside])/pix_inside.sum()\n # get distribution of contents according to Poisson, integrating\n # the distribution within the same bins of the histogram above:\n poiss = np.array([poisson.cdf(x2*all_events, mu) -\n poisson.cdf(x1*all_events, mu) for\n x1, x2 in zip(bins[:-1], bins[1:])])\n # from probability to number of pixels:\n npixels = poiss * pix_inside.sum()\n # log bin centers:\n k = np.sqrt(bins[:-1]*bins[1:])\n axes[i, 2].plot(k[npixels > 0], npixels[npixels > 0],\n drawstyle='steps-mid',\n label='Poisson for uniform density')\n axes[i, 2].set_ylim(top=1.2*axes[i, 2].get_ylim()[1])\n axes[i, 2].legend(loc='best')\n axes[i, 2].set_xlabel('Fraction of events')\n axes[i, 2].set_ylabel('# of pixels (excluding edge pixels)')\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=pagesize)\n fig.suptitle('COSMICS, image parameters', fontsize='xx-large')\n fig.tight_layout(rect=[0.05, 0.05, 1.0, 0.9],\n pad=0., h_pad=3.0, w_pad=2.0)\n histos = ['hist_dist0', 'hist_dist0_intensity_gt_200']\n for i, hist in enumerate(histos):\n bins = hist_binning.col(hist)[0]\n # normalize bin content by area of the corresponding ring:\n ringarea = np.pi*(bins[1:]**2-bins[:-1]**2)*u.deg**2\n\n axes[i, 0].hist(bins[:-1], bins,\n weights=np.sum(table_cosmics.col(hist), axis=0) /\n ringarea.value, histtype='step')\n axes[i, 0].set_xlabel('distance (deg)')\n axes[i, 0].set_ylabel('events per deg2')\n axes[0, 0].set_title('cog radial distribution')\n axes[1, 0].set_title('cog radial distribution, intensity>200pe')\n\n histos = ['hist_width', 'hist_length']\n for i, hist in enumerate(histos):\n bins = hist_binning.col(hist)[0]\n x = np.array([xx for xx in bins[0][:-1] for __ in bins[1][:-1]])\n y = np.array([yy for __ in bins[0][:-1] for yy in bins[1][:-1]])\n contents = np.sum(table_cosmics.col(hist), axis=0).flatten()\n _, _, _, image = axes[i, 1].hist2d(x, y, bins=bins,\n weights=contents,\n norm=colors.LogNorm())\n plt.colorbar(image, ax=axes[i, 1])\n axes[i, 1].set_xscale('log')\n axes[i, 1].set_xlabel('Intensity (p.e.)')\n axes[0, 1].set_ylabel('Width (deg)')\n axes[1, 1].set_ylabel('Length (deg)')\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=pagesize)\n fig.suptitle('COSMICS, image parameters', fontsize='xx-large')\n fig.tight_layout(rect=[0.05, 0.05, 1.0, 0.9],\n pad=0., h_pad=3.0, w_pad=2.0)\n histos = ['hist_skewness', 'hist_intercept', 'hist_tgrad_vs_length',\n 'hist_tgrad_vs_length_intensity_gt_200']\n for i, hist in enumerate(histos):\n bins = hist_binning.col(hist)[0]\n x = np.array([xx for xx in bins[0][:-1] for __ in bins[1][:-1]])\n y = np.array([yy for __ in bins[0][:-1] for yy in bins[1][:-1]])\n contents = np.sum(table_cosmics.col(hist), axis=0).flatten()\n _, _, _, image = axes.flatten()[i].hist2d(x, y, bins=bins,\n weights=contents,\n norm=colors.LogNorm())\n plt.colorbar(image, ax=axes.flatten()[i])\n axes[0, 0].set_ylabel('Skewness')\n axes[0, 0].grid(linewidth=0.3, linestyle=':')\n axes[0, 1].set_ylabel('Intercept (fitted time @ charge cog) (ns)')\n for j in [0, 1]:\n axes[0, j].set_xscale('log')\n axes[0, j].set_xlabel('Intensity')\n axes[1, j].set_xlabel('Length (deg)')\n axes[1, j].set_ylabel('Time gradient (ns/deg)')\n axes[1, 0].set_title('Time gradient vs. Length')\n axes[1, 1].set_title('Time gradient vs. Length, intensity>200pe')\n pdf.savefig()\n # End of the plots created from the DL1 datacheck file\n # keep some info needed for muon ring plots:\n subrun_list = np.array(table.col('subrun_index'))\n elapsed_t = np.array(table_cosmics.col('elapsed_time'))\n file.close()\n\n # Now we go for the muons .fits files, created in the R0 to DL1 stage.\n # We look for the files with the same subrun indices that have been\n # processed.\n muon_filenames = []\n dcfile = \\\n parse_datacheck_dl1_filename(os.path.basename(datacheck_filename))\n for i in subrun_list:\n if muons_dir is not None:\n dirname = muons_dir\n # if no directory is provided, we assume the muons fits files are\n # in the same directory of the datacheck file.\n else:\n dirname = os.path.dirname(datacheck_filename)\n name = run_to_muon_filename(dcfile.tel_id, dcfile.run, i, None,\n False)\n if Path(dirname, name).exists():\n muon_filenames.append(Path(dirname,name))\n else:\n logger.warning(f'File {str(Path(dirname,name))} not found. '\n f'No muon information will be plotted!')\n return\n\n muons_table = Table.read(muon_filenames[0])\n contained_muons = muons_table[muons_table['ring_containment'] > 0.999]\n # to get some quantities vs. subrun index:\n num_rings = np.array([len(muons_table)])\n num_contained_rings = np.array([len(contained_muons)])\n mean_width = np.array(np.mean(contained_muons['ring_width']))\n sem_width = np.array(sem(contained_muons['ring_width']))\n mean_effi = np.array(np.mean(contained_muons['muon_efficiency']))\n sem_effi = np.array(sem(contained_muons['muon_efficiency']))\n\n for filename in muon_filenames[1:]:\n t = Table.read(filename)\n tcont = t[t['ring_containment'] > 0.999]\n # to get some quantities vs. subrun index:\n num_rings = np.append(num_rings, len(t))\n num_contained_rings = np.append(num_contained_rings, len(tcont))\n mean_width = np.append(mean_width, np.mean(tcont['ring_width']))\n sem_width = np.append(sem_width, sem(tcont['ring_width']))\n mean_effi = np.append(mean_effi, np.mean(tcont['muon_efficiency']))\n sem_effi = np.append(sem_effi, sem(tcont['muon_efficiency']))\n # to get the whole muon rings tables:\n muons_table = vstack([muons_table, t])\n contained_muons = vstack([contained_muons, tcont])\n\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=pagesize)\n fig.suptitle('MUON RINGS', fontsize='xx-large')\n fig.tight_layout(rect=[0.0, 0.0, 1.0, 0.97],\n pad=3.0, h_pad=3.0, w_pad=2.0)\n fmt = '-'\n if len(subrun_list) == 1:\n fmt = 'o'\n axes[0, 0].set_ylim(0, num_rings.max()*1.15)\n axes[0, 0].plot(subrun_list, num_rings, fmt, label='all rings in files')\n axes[0, 0].plot(subrun_list, num_contained_rings, fmt,\n label='contained rings')\n axes[0, 0].set_ylabel('number of muon rings per subrun')\n axes[0, 0].legend(loc='best')\n\n muon_rate = num_rings / elapsed_t\n contained_muon_rate = num_contained_rings / elapsed_t\n axes[0, 1].set_ylim(0, muon_rate.max()*1.15)\n axes[0, 1].plot(subrun_list, muon_rate, fmt, label='all rings in files')\n axes[0, 1].plot(subrun_list, contained_muon_rate, fmt,\n label='contained rings')\n axes[0, 1].set_ylabel('rate of muon rings (events/s)')\n axes[0, 1].legend(loc='best')\n for j in (0, 1):\n axes[0, j].set_xlabel('subrun index')\n axes[1, 0].hist(muons_table['ring_containment'],\n bins=np.linspace(0., 1., 51),\n weights=np.ones(len(muons_table))/num_rings.sum())\n axes[1, 0].set_xlabel('ring containment')\n binning = np.linspace(0., 1., 31)\n axes[1, 1].hist(muons_table['ring_completeness'],\n bins=binning, histtype='step',\n weights=np.ones(len(muons_table))/num_rings.sum(),\n label='all rings in files')\n axes[1, 1].hist(contained_muons['ring_completeness'], bins=binning,\n histtype='step',\n weights=np.ones(len(contained_muons))/num_rings.sum(),\n label='contained rings')\n axes[1, 1].set_xlabel('ring completeness')\n axes[1, 1].legend(loc='best')\n for j in (0, 1):\n axes[1, j].set_ylabel('fraction of rings')\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=2, ncols=3, figsize=pagesize)\n fig.suptitle('MUON RINGS with containment = 1', fontsize='xx-large')\n fig.tight_layout(rect=[0.0, 0.0, 1.0, 0.97],\n pad=3.0, h_pad=3.0, w_pad=2.0)\n axes[0, 0].hist(np.sqrt(contained_muons['ring_center_x']**2. +\n contained_muons['ring_center_y']**2.),\n bins=np.linspace(0., 2., 51))\n axes[0, 0].set_xlabel('ring center, distance from camera center (m)')\n axes[0, 0].set_ylabel('number of rings')\n axes[1, 0].plot(contained_muons['impact_parameter'],\n contained_muons['ring_completeness'], 'x', alpha=0.5)\n axes[1, 0].set_xlabel('reconstructed impact parameter (m)')\n axes[1, 0].set_ylabel('ring completeness')\n axes[0, 1].plot(contained_muons['ring_radius'],\n contained_muons['ring_size'], 'x', alpha=0.5)\n axes[0, 1].set_xlabel('ring radius (deg)')\n axes[0, 1].set_ylabel('ring intensity (p.e.)')\n axes[1, 1].plot(contained_muons['ring_radius'],\n contained_muons['ring_width'], 'x', alpha=0.5)\n axes[1, 1].set_xlabel('ring radius (deg)')\n axes[1, 1].set_ylabel('ring width (deg)')\n axes[0, 2].hist(contained_muons['ring_size'],\n bins=np.linspace(0., 4.e3, 41))\n axes[0, 2].set_xlabel('ring intensity (p.e.)')\n axes[0, 2].set_ylabel('number of rings')\n axes[1, 2].hist(contained_muons['ring_width'],\n bins=np.linspace(0., 0.3, 61))\n axes[1, 2].set_xlabel('ring width (deg)')\n axes[1, 2].set_ylabel('number of rings')\n pdf.savefig()\n\n fig, axes = plt.subplots(nrows=2, ncols=3, figsize=pagesize)\n fig.suptitle('MUON RINGS with containment = 1', fontsize='xx-large')\n fig.tight_layout(rect=[0.0, 0.0, 1.0, 0.97],\n pad=2.0, h_pad=3.0, w_pad=3.0)\n axes[0, 0].plot(contained_muons['hg_peak_sample'],\n contained_muons['lg_peak_sample'], 'x', alpha=0.5)\n axes[0, 0].set_xlabel('High gain peak sample in R1 waveform')\n axes[0, 0].set_ylabel('Low gain peak sample in R1 waveform')\n binning = np.linspace(-0.5, 38.5, 39)\n axes[1, 0].hist(contained_muons['hg_peak_sample'], bins=binning,\n histtype='step', label='HG')\n axes[1, 0].hist(contained_muons['lg_peak_sample'], bins=binning,\n histtype='step', label='LG')\n axes[1, 0].set_xlabel('peak sample in R1 waveform')\n axes[1, 0].set_ylabel('number of rings')\n axes[1, 0].legend(loc='best')\n axes[0, 1].hist(contained_muons['muon_efficiency'],\n bins=np.linspace(0., 0.5, 51))\n axes[0, 1].set_xlabel('estimated telescope efficiency for muons')\n axes[0, 1].set_ylabel('number of rings')\n axes[1, 1].plot(contained_muons['ring_width'],\n contained_muons['muon_efficiency'], 'x', alpha=0.5)\n axes[1, 1].set_ylim(0., 0.5)\n axes[1, 1].set_xlabel('ring width (deg)')\n axes[1, 1].set_ylabel('estimated telescope efficiency for muons')\n axes[0, 2].errorbar(subrun_list, mean_effi, yerr=sem_effi, fmt='o',\n markersize=3.)\n\n axes[0, 2].set_xlabel('subrun index')\n axes[0, 2].set_ylabel('estimated telescope efficiency for muons')\n axes[0, 2].grid(linewidth=0.3, linestyle=':')\n axes[0, 2].set_ylim(0., 0.5)\n axes[1, 2].errorbar(subrun_list, mean_width, yerr=sem_width, fmt='o',\n markersize=3.)\n axes[1, 2].set_xlabel('subrun index')\n axes[1, 2].set_ylabel('ring width (deg)')\n axes[1, 2].grid(linewidth=0.3, linestyle=':')\n axes[1, 2].set_ylim(0., 0.3)\n pdf.savefig()\n\n\ndef plot_trigger_types(dchecktables, trigger_name, axes):\n \"\"\"\n\n Parameters\n ----------\n dchecktables: array of python tables created with DL1DataCheckContainer\n containers (each row is one subrun). The plotted trigger type statistics\n will be the global ones, adding up the numbers from all the tables and\n all the rows in each table.\n Inside the table the trigger type columns have shape (n,10,2). n is the\n number of rows (one per subrun). 10 is the number of possible trigger\n types (just fixed to a safely large value). The remaining 2 are the pairs\n (trigger_id, number of entries with that id)\n\n trigger_name: name of the trigger type column in the tables\n axes: where to place the plots\n\n Returns\n -------\n None\n\n \"\"\"\n\n # find all trigger types found in the subruns, and display histogram:\n # first merge subrun-wise tables:\n tt = dchecktables[0].col(trigger_name)\n for table in dchecktables[1:]:\n tt = np.append(tt, table.col(trigger_name), axis=0)\n # keep only entries with number of events > 0 (existing trig types):\n tt = tt[tt[:, :, 1] > 0]\n trig_types = np.unique(tt[:, 0])\n num_triggers = np.array([(tt[:, 1][tt[:, 0] == trig]).sum()\n for trig in trig_types])\n x = np.arange(2+len(trig_types))\n # for better display, leave some space on the sides of the bars:\n y = np.append([0], np.append(num_triggers, [0]))\n labels = ['']+[str(i) for i in trig_types]+['']\n width = 0.3\n axes.bar(x, y, width)\n axes.set_xticks(x)\n axes.set_xticklabels(labels)\n axes.set_yscale('log')\n axes.set_xlabel(trigger_name)\n axes.set_ylabel('number of events')\n\n\ndef plot_mean_and_stddev(table, camgeom, columns, labels, pagesize, norm='lin'):\n \"\"\"\n Parameters\n ----------\n table: python table containing pixel-wise information to be displayed\n camgeom: camera geometry\n columns: list of 2 strings, columns of 'table', first one is the mean and\n the second the std deviation to be plotted\n labels: plot titles\n pagesize: [width, height] in cm\n norm: lin or log, z-scale of camera displays\n\n Returns\n -------\n None\n\n The subrun-wise mean and std dev values are used to calculate the\n run-wise (i.e. for all processed subruns which appear in the table)\n counterparts of the same, which are then plotted.\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n # calculate pixel-wise mean and standard deviation for the whole run,\n # from the subrun-wise values:\n mean = np.sum(np.multiply(table.col(columns[0]),\n table.col('num_events')[:, None]),\n axis=0) / np.sum(table.col('num_events'))\n stddev = np.sqrt(np.sum(np.multiply(table.col(columns[1]) ** 2,\n table.col('num_events')[:, None]),\n axis=0) / np.sum(table.col('num_events')))\n\n if np.isnan(mean).sum() > 0:\n logger.info(f'Pixels with NaNs in {columns[0]}: '\n f'{np.array(camgeom.pix_id.tolist())[np.isnan(mean)]}')\n\n # plot mean and std dev (of e.g. pedestal charge or time), as camera\n # display, vs. pixel id, and as a histogram:\n fig, axes = plt.subplots(nrows=2, ncols=3,\n figsize=pagesize)\n fig.suptitle(labels[2], fontsize='xx-large')\n fig.tight_layout(rect=[0, 0.03, 1, 0.98], pad=3.0, h_pad=3.0, w_pad=2.0)\n cam = CameraDisplay(camgeom, mean, ax=axes[0, 0], norm=norm,\n title=labels[0])\n cam.add_colorbar(ax=axes[0, 0])\n cam.show()\n cam = CameraDisplay(camgeom, stddev, ax=axes[1, 0], norm=norm,\n title=labels[1])\n cam.add_colorbar(ax=axes[1, 0])\n # line below needed to get the top and bottom camera displays of equal size:\n axes[1, 0].set_xlim((axes[0, 0].get_xlim()))\n cam.show()\n # plot mean vs. pixe_id and as histogram:\n axes[0, 1].plot(camgeom.pix_id, mean)\n axes[0, 1].set_xlabel('Pixel id')\n axes[0, 1].set_ylabel(labels[0])\n axes[0, 2].set_yscale('log')\n axes[0, 2].hist(mean[~np.isnan(mean)], bins=200)\n axes[0, 2].set_xlabel(labels[0])\n axes[0, 2].set_ylabel('Number of pixels')\n # now the standard deviation:\n axes[1, 1].plot(camgeom.pix_id, stddev)\n axes[1, 1].set_xlabel('Pixel id')\n axes[1, 1].set_ylabel(labels[1])\n axes[1, 2].set_yscale('log')\n axes[1, 2].hist(stddev[~np.isnan(stddev)], bins=200)\n axes[1, 2].set_xlabel(labels[1])\n axes[1, 2].set_ylabel('Number of pixels')\n\n\ndef write_error_page(tablename, pagesize):\n \"\"\"\n\n Parameters\n ----------\n tablename: name of the table which has no entries to be plotted\n pagesize: [width, height] (cm)\n Returns\n -------\n None\n \"\"\"\n _, axes = plt.subplots(nrows=1, ncols=1, figsize=pagesize)\n plt.text(0.5, 0.5, 'Sorry, no ' + tablename + ' to plot here!',\n fontsize=44, horizontalalignment='center',\n verticalalignment='center')\n axes.axis('off')\n\n\ndef merge_dl1datacheck_files(file_list):\n \"\"\"\n\n Parameters\n ----------\n file_list: list of strings, names of files of the kind produced by\n function check_dl1\n\n Returns\n -------\n merged_filename: name of the .h5 file which contains all the rows of the\n files in the list (in the tables cosmics, pedestals and flatfield)\n The camera geometry, histogram_binnings and used_trigger_tag are copied\n just from the first file\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n first_file_name = file_list[0]\n first_file = tables.open_file(first_file_name)\n # get run number and build the name of the merged file:\n file = parse_datacheck_dl1_filename(os.path.basename(first_file_name))\n merged_filename = run_to_datacheck_dl1_filename(file.tel_id, file.run,\n None, None)\n # Store the merged file in the same directory as the subrun-wise files:\n merged_filename = Path(os.path.dirname(first_file_name), merged_filename)\n\n # The input (sub-run wise) list should never contain the name of the\n # run-wise file that we will produce by merging. Just to avoid accidents:\n if str(merged_filename) in file_list:\n file_list.remove(str(merged_filename))\n\n print(file_list)\n\n merged_file = tables.open_file(merged_filename, 'w')\n merged_file.create_group('/', 'dl1datacheck')\n merged_file.create_group('/', 'instrument')\n\n # The tables in the merged file will be copied from the first file. If a\n # table is missing in the first file (e.g. pedestals) it will be left\n # empty in the whole merged file.\n\n if '/dl1datacheck/pedestals' in first_file.root.dl1datacheck:\n pedestals = \\\n first_file.copy_node('/dl1datacheck', name='pedestals',\n newparent=merged_file.root.dl1datacheck)\n else:\n pedestals = None\n\n if '/dl1datacheck/flatfield' in first_file.root.dl1datacheck:\n flatfield = \\\n first_file.copy_node('/dl1datacheck', name='flatfield',\n newparent=merged_file.root.dl1datacheck)\n else:\n flatfield = None\n\n # the ones below are compulsory, an exception will be raised if not present:\n cosmics = first_file.copy_node('/dl1datacheck', name='cosmics',\n newparent=merged_file.root.dl1datacheck)\n first_file.copy_node('/dl1datacheck', name='histogram_binning',\n newparent=merged_file.root.dl1datacheck)\n first_file.copy_node('/dl1datacheck', name='used_trigger_tag',\n newparent=merged_file.root.dl1datacheck)\n first_file.close()\n\n for filename in file_list[1:]:\n file = tables.open_file(filename)\n if pedestals is not None:\n if '/dl1datacheck/pedestals' in file.root.dl1datacheck:\n pedestals.append(file.root.dl1datacheck.pedestals[:])\n else:\n logger.warning('Table pedestals is missing in file ' +\n str(filename))\n if flatfield is not None:\n if '/dl1datacheck/flatfield' in file.root.dl1datacheck:\n flatfield.append(file.root.dl1datacheck.flatfield[:])\n else:\n logger.warning('Table flatfield is missing in file ' +\n str(filename))\n\n cosmics.append(file.root.dl1datacheck.cosmics[:])\n file. close()\n\n merged_file.close()\n\n # For copying the camera geometry we use astropy tables to avoid a\n # NaturalNameWarning from tables/path.py\n cam_description_table = \\\n Table.read(first_file_name, path='instrument/telescope/camera/LSTCam')\n geom = CameraGeometry.from_table(cam_description_table)\n geom.to_table().write(merged_filename,\n path=f'/instrument/telescope/camera/LSTCam',\n append=True, serialize_meta=True)\n\n return merged_filename\n"
]
| [
[
"matplotlib.pyplot.text",
"numpy.median",
"numpy.mean",
"pandas.read_hdf",
"scipy.stats.sem",
"scipy.stats.poisson.cdf",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplots",
"numpy.sqrt",
"numpy.append",
"numpy.array",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.ticker.FormatStrFormatter",
"numpy.std",
"matplotlib.colors.LogNorm",
"numpy.isnan",
"numpy.sum",
"numpy.rad2deg",
"numpy.linspace",
"numpy.unique"
]
]
|
Liang813/GaitGraph | [
"749aa32ce079f0afaa39b15a90c8f1664f864436"
]
| [
"src/pose_estimator/utils.py"
]
| [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nimport math\n\nimport numpy as np\nimport cv2\n\n\ndef transform_preds(coords, center, scale, output_size):\n target_coords = np.zeros(coords.shape)\n trans = get_affine_transform(center, scale, 0, output_size, inv=1)\n for p in range(coords.shape[0]):\n target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)\n return target_coords\n\n\ndef get_affine_transform(\n center, scale, rot, output_size,\n shift=np.array([0, 0], dtype=np.float32), inv=0\n):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n print(scale)\n scale = np.array([scale, scale])\n\n scale_tmp = scale * 200.0\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans\n\n\ndef affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.]).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]\n\n\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n\n\ndef get_max_preds(batch_heatmaps):\n \"\"\"\n get predictions from score maps\n heatmaps: numpy.ndarray([batch_size, num_joints, height, width])\n \"\"\"\n assert isinstance(batch_heatmaps, np.ndarray), \\\n 'batch_heatmaps should be numpy.ndarray'\n assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'\n\n batch_size = batch_heatmaps.shape[0]\n num_joints = batch_heatmaps.shape[1]\n width = batch_heatmaps.shape[3]\n heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))\n idx = np.argmax(heatmaps_reshaped, 2)\n maxvals = np.amax(heatmaps_reshaped, 2)\n\n maxvals = maxvals.reshape((batch_size, num_joints, 1))\n idx = idx.reshape((batch_size, num_joints, 1))\n\n preds = np.tile(idx, (1, 1, 2)).astype(np.float32)\n\n preds[:, :, 0] = (preds[:, :, 0]) % width\n preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)\n\n pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))\n pred_mask = pred_mask.astype(np.float32)\n\n preds *= pred_mask\n return preds, maxvals\n\n\ndef get_final_preds(config, batch_heatmaps, center, scale):\n coords, maxvals = get_max_preds(batch_heatmaps)\n\n heatmap_height = batch_heatmaps.shape[2]\n heatmap_width = batch_heatmaps.shape[3]\n\n # post-processing\n if config.TEST.POST_PROCESS:\n for n in range(coords.shape[0]):\n for p in range(coords.shape[1]):\n hm = batch_heatmaps[n][p]\n px = int(math.floor(coords[n][p][0] + 0.5))\n py = int(math.floor(coords[n][p][1] + 0.5))\n if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:\n diff = np.array(\n [\n hm[py][px+1] - hm[py][px-1],\n hm[py+1][px]-hm[py-1][px]\n ]\n )\n coords[n][p] += np.sign(diff) * .25\n\n preds = coords.copy()\n\n # Transform back\n for i in range(coords.shape[0]):\n preds[i] = transform_preds(\n coords[i], center[i], scale[i], [heatmap_width, heatmap_height]\n )\n\n return preds, maxvals\n"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.sin",
"numpy.zeros",
"numpy.tile",
"numpy.sign",
"numpy.float32",
"numpy.argmax",
"numpy.amax",
"numpy.cos",
"numpy.greater",
"numpy.floor"
]
]
|
anandojha/gamd_we | [
"2e4feb1cf757cef2615f0bdeaeae52e07a2b3071"
]
| [
"gamd_we/gamd_we_alanine_dipeptide.py"
]
| [
"import matplotlib.image as mpimg\nimport matplotlib.style as style\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nfrom simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nimport seaborn as sns\nfrom math import exp\nimport pandas as pd\nimport mdtraj as md\nimport pickle as pk\nimport numpy as np\nimport statistics\nimport itertools\nimport fileinput\nimport fnmatch\nimport shutil\nimport random\nimport math\nimport os\nimport re\n\n\ndef fix_cap_remove_ace(pdb_file):\n\n \"\"\"\n Removes the H atoms of the capped ACE residue.\n\n \"\"\"\n\n remove_words = [\n \"H1 ACE\",\n \"H2 ACE\",\n \"H3 ACE\",\n \"H31 ACE\",\n \"H32 ACE\",\n \"H33 ACE\",\n ]\n with open(pdb_file) as oldfile, open(\"intermediate.pdb\", \"w\") as newfile:\n for line in oldfile:\n if not any(word in line for word in remove_words):\n newfile.write(line)\n command = \"rm -rf \" + pdb_file\n os.system(command)\n command = \"mv intermediate.pdb \" + pdb_file\n os.system(command)\n\n\ndef fix_cap_replace_ace(pdb_file):\n\n \"\"\"\n Replaces the alpha carbon atom of the\n capped ACE residue with a standard name.\n\n \"\"\"\n\n fin = open(pdb_file, \"rt\")\n data = fin.read()\n data = data.replace(\"CA ACE\", \"CH3 ACE\")\n data = data.replace(\"C ACE\", \"CH3 ACE\")\n fin.close()\n fin = open(pdb_file, \"wt\")\n fin.write(data)\n fin.close()\n\n\ndef fix_cap_remove_nme(pdb_file):\n\n \"\"\"\n Removes the H atoms of the capped NME residue.\n\n \"\"\"\n\n remove_words = [\n \"H1 NME\",\n \"H2 NME\",\n \"H3 NME\",\n \"H31 NME\",\n \"H32 NME\",\n \"H33 NME\",\n ]\n with open(pdb_file) as oldfile, open(\"intermediate.pdb\", \"w\") as newfile:\n for line in oldfile:\n if not any(word in line for word in remove_words):\n newfile.write(line)\n command = \"rm -rf \" + pdb_file\n os.system(command)\n command = \"mv intermediate.pdb \" + pdb_file\n os.system(command)\n\n\ndef fix_cap_replace_nme(pdb_file):\n\n \"\"\"\n Replaces the alpha carbon atom of the\n capped NME residue with a standard name.\n\n \"\"\"\n\n fin = open(pdb_file, \"rt\")\n data = fin.read()\n data = data.replace(\"CA NME\", \"CH3 NME\")\n data = data.replace(\"C NME\", \"CH3 NME\")\n fin.close()\n fin = open(pdb_file, \"wt\")\n fin.write(data)\n fin.close()\n\n\ndef prepare_alanine_dipeptide():\n\n \"\"\"\n\n Prepares the alanine dipeptide system for Gaussian\n Accelerated Molecular Dynamics (GaMD) simulations.\n Downloads the pdb structure from\n https://markovmodel.github.io/mdshare/ALA2/ and\n parameterizes it using General Amber Force Field\n (GAFF).\n\n \"\"\"\n\n os.system(\n \"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb\"\n )\n os.system(\n \"rm -rf system_inputs\"\n ) # Removes any existing directory named system_inputs\n os.system(\"mkdir system_inputs\") # Creates a directory named system_inputs\n cwd = os.getcwd()\n target_dir = cwd + \"/\" + \"system_inputs\"\n os.system(\"pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb\")\n # Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)\n remove_words = [\"HH31 ACE\", \"HH32 ACE\", \"HH33 ACE\"]\n with open(\"intermediate.pdb\") as oldfile, open(\n \"system.pdb\", \"w\"\n ) as newfile:\n for line in oldfile:\n if not any(word in line for word in remove_words):\n newfile.write(line)\n os.system(\"rm -rf intermediate*\")\n # save the tleap script to file\n with open(\"input_TIP3P.leap\", \"w\") as f:\n f.write(\n \"\"\"\n source leaprc.protein.ff14SB\n source leaprc.water.tip3p\n set default FlexibleWater on\n set default PBRadii mbondi2\n pdb = loadpdb system.pdb\n solvateBox pdb TIP3PBOX 15\n saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd\n saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7\n savepdb pdb system_TIP3P.pdb\n quit\n \"\"\"\n )\n os.system(\"tleap -f input_TIP3P.leap\")\n os.system(\"rm -rf leap.log\")\n shutil.copy(\n cwd + \"/\" + \"system_TIP3P.inpcrd\",\n target_dir + \"/\" + \"system_TIP3P.inpcrd\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_TIP3P.parm7\",\n target_dir + \"/\" + \"system_TIP3P.parm7\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_TIP3P.pdb\", target_dir + \"/\" + \"system_TIP3P.pdb\"\n )\n shutil.copy(\n cwd + \"/\" + \"system_TIP3P.prmtop\",\n target_dir + \"/\" + \"system_TIP3P.prmtop\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_TIP3P.rst7\", target_dir + \"/\" + \"system_TIP3P.rst7\"\n )\n shutil.copy(cwd + \"/\" + \"system.pdb\", target_dir + \"/\" + \"system.pdb\")\n shutil.copy(\n cwd + \"/\" + \"alanine-dipeptide-nowater.pdb\",\n target_dir + \"/\" + \"alanine-dipeptide-nowater.pdb\",\n )\n shutil.copy(\n cwd + \"/\" + \"input_TIP3P.leap\", target_dir + \"/\" + \"input_TIP3P.leap\"\n )\n os.system(\"rm -rf system_TIP3P.inpcrd\")\n os.system(\"rm -rf system_TIP3P.parm7\")\n os.system(\"rm -rf system_TIP3P.pdb\")\n os.system(\"rm -rf system_TIP3P.inpcrd\")\n os.system(\"rm -rf system_TIP3P.rst7\")\n os.system(\"rm -rf system_TIP3P.prmtop\")\n os.system(\"rm -rf system.pdb\")\n os.system(\"rm -rf input_TIP3P.leap\")\n os.system(\"rm -rf alanine-dipeptide-nowater.pdb\")\n\n\ndef create_vectors(x):\n\n \"\"\"\n Extracts peridic box information from the\n given line.\n\n \"\"\"\n x = str(x)\n x = x.replace(\"Vec3\", \"\")\n x = re.findall(\"\\d*\\.?\\d+\", x)\n for i in range(0, len(x)):\n x[i] = float(x[i])\n x = tuple(x)\n n = int(len(x) / 3)\n x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]\n return x\n\n\ndef simulated_annealing(\n parm=\"system_TIP3P.prmtop\",\n rst=\"system_TIP3P.inpcrd\",\n annealing_output_pdb=\"system_annealing_output.pdb\",\n annealing_steps=100000,\n pdb_freq=100000,\n starting_temp=0,\n target_temp=300,\n temp_incr=3,\n):\n\n \"\"\"\n\n Performs simulated annealing of the system from\n 0K to 300 K (default) using OpenMM MD engine and\n saves the last frame of the simulation to be\n accessed by the next simulation.\n\n Parameters\n ----------\n parm: str\n System's topology file\n\n rst: str\n System's coordinate file\n\n annealing_output_pdb: str\n System's output trajectory file\n\n annealing_steps: int\n Aneealing steps at each temperatrure jump\n\n pdb_freq: int\n Trajectory to be saved after every pdb_freq steps\n\n starting_temp: int\n Initial temperature of Simulated Annealing\n\n target_temp: int\n Final temperature of Simulated Annealing\n\n temp_incr: int\n Temmperature increase for every step\n\n \"\"\"\n\n prmtop = AmberPrmtopFile(parm)\n inpcrd = AmberInpcrdFile(rst)\n annealing_system = prmtop.createSystem(\n nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds\n )\n annealing_integrator = LangevinIntegrator(\n 0 * kelvin, 1 / picosecond, 2 * femtoseconds\n )\n total_steps = ((target_temp / temp_incr) + 1) * annealing_steps\n annealing_temp_range = int((target_temp / temp_incr) + 1)\n annealing_platform = Platform.getPlatformByName(\"CUDA\")\n annealing_properties = {\"CudaDeviceIndex\": \"0\", \"CudaPrecision\": \"mixed\"}\n annealing_simulation = Simulation(\n prmtop.topology,\n annealing_system,\n annealing_integrator,\n annealing_platform,\n annealing_properties,\n )\n annealing_simulation.context.setPositions(inpcrd.positions)\n if inpcrd.boxVectors is not None:\n annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)\n annealing_simulation.minimizeEnergy()\n annealing_simulation.reporters.append(\n PDBReporter(annealing_output_pdb, pdb_freq)\n )\n simulated_annealing_last_frame = (\n annealing_output_pdb[:-4] + \"_last_frame.pdb\"\n )\n annealing_simulation.reporters.append(\n PDBReporter(simulated_annealing_last_frame, total_steps)\n )\n annealing_simulation.reporters.append(\n StateDataReporter(\n stdout,\n pdb_freq,\n step=True,\n time=True,\n potentialEnergy=True,\n totalSteps=total_steps,\n temperature=True,\n progress=True,\n remainingTime=True,\n speed=True,\n separator=\"\\t\",\n )\n )\n temp = starting_temp\n while temp <= target_temp:\n annealing_integrator.setTemperature(temp * kelvin)\n if temp == starting_temp:\n annealing_simulation.step(annealing_steps)\n annealing_simulation.saveState(\"annealing.state\")\n else:\n annealing_simulation.loadState(\"annealing.state\")\n annealing_simulation.step(annealing_steps)\n temp += temp_incr\n state = annealing_simulation.context.getState()\n print(state.getPeriodicBoxVectors())\n annealing_simulation_box_vectors = state.getPeriodicBoxVectors()\n print(annealing_simulation_box_vectors)\n with open(\"annealing_simulation_box_vectors.pkl\", \"wb\") as f:\n pk.dump(annealing_simulation_box_vectors, f)\n print(\"Finshed NVT Simulated Annealing Simulation\")\n\n\ndef npt_equilibration(\n parm=\"system_TIP3P.prmtop\",\n npt_output_pdb=\"system_npt_output.pdb\",\n pdb_freq=500000,\n npt_steps=5000000,\n target_temp=300,\n npt_pdb=\"system_annealing_output_last_frame.pdb\",\n):\n\n \"\"\"\n\n Performs NPT equilibration MD of the system\n using OpenMM MD engine and saves the last\n frame of the simulation to be accessed by\n the next simulation.\n\n Parameters\n ----------\n parm: str\n System's topology file\n\n npt_output_pdb: str\n System's output trajectory file\n\n pdb_freq: int\n Trajectory to be saved after every pdb_freq steps\n\n npt_steps: int\n NPT simulation steps\n\n target_temp: int\n Temperature for MD simulation\n\n npt_pdb: str\n Last frame of the simulation\n\n \"\"\"\n\n npt_init_pdb = PDBFile(npt_pdb)\n prmtop = AmberPrmtopFile(parm)\n npt_system = prmtop.createSystem(\n nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds\n )\n barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)\n npt_system.addForce(barostat)\n npt_integrator = LangevinIntegrator(\n target_temp * kelvin, 1 / picosecond, 2 * femtoseconds\n )\n npt_platform = Platform.getPlatformByName(\"CUDA\")\n npt_properties = {\"CudaDeviceIndex\": \"0\", \"CudaPrecision\": \"mixed\"}\n npt_simulation = Simulation(\n prmtop.topology,\n npt_system,\n npt_integrator,\n npt_platform,\n npt_properties,\n )\n npt_simulation.context.setPositions(npt_init_pdb.positions)\n npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)\n with open(\"annealing_simulation_box_vectors.pkl\", \"rb\") as f:\n annealing_simulation_box_vectors = pk.load(f)\n annealing_simulation_box_vectors = create_vectors(\n annealing_simulation_box_vectors\n )\n npt_simulation.context.setPeriodicBoxVectors(\n annealing_simulation_box_vectors[0],\n annealing_simulation_box_vectors[1],\n annealing_simulation_box_vectors[2],\n )\n npt_last_frame = npt_output_pdb[:-4] + \"_last_frame.pdb\"\n npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))\n npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))\n npt_simulation.reporters.append(\n StateDataReporter(\n stdout,\n pdb_freq,\n step=True,\n time=True,\n potentialEnergy=True,\n totalSteps=npt_steps,\n temperature=True,\n progress=True,\n remainingTime=True,\n speed=True,\n separator=\"\\t\",\n )\n )\n npt_simulation.minimizeEnergy()\n npt_simulation.step(npt_steps)\n npt_simulation.saveState(\"npt_simulation.state\")\n state = npt_simulation.context.getState()\n print(state.getPeriodicBoxVectors())\n npt_simulation_box_vectors = state.getPeriodicBoxVectors()\n print(npt_simulation_box_vectors)\n with open(\"npt_simulation_box_vectors.pkl\", \"wb\") as f:\n pk.dump(npt_simulation_box_vectors, f)\n print(\"Finished NPT Simulation\")\n\n\ndef nvt_equilibration(\n parm=\"system_TIP3P.prmtop\",\n nvt_output_pdb=\"system_nvt_output.pdb\",\n pdb_freq=500000,\n nvt_steps=5000000,\n target_temp=300,\n nvt_pdb=\"system_npt_output_last_frame.pdb\",\n):\n\n \"\"\"\n\n Performs NVT equilibration MD of the system\n using OpenMM MD engine saves the last\n frame of the simulation to be accessed by\n the next simulation.\n\n Parameters\n ----------\n parm: str\n System's topology file\n\n nvt_output_pdb: str\n System's output trajectory file\n\n pdb_freq: int\n Trajectory to be saved after every pdb_freq steps\n\n nvt_steps: int\n NVT simulation steps\n\n target_temp: int\n Temperature for MD simulation\n\n nvt_pdb: str\n Last frame of the simulation\n\n \"\"\"\n\n nvt_init_pdb = PDBFile(nvt_pdb)\n prmtop = AmberPrmtopFile(parm)\n nvt_system = prmtop.createSystem(\n nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds\n )\n nvt_integrator = LangevinIntegrator(\n target_temp * kelvin, 1 / picosecond, 2 * femtoseconds\n )\n nvt_platform = Platform.getPlatformByName(\"CUDA\")\n nvt_properties = {\"CudaDeviceIndex\": \"0\", \"CudaPrecision\": \"mixed\"}\n nvt_simulation = Simulation(\n prmtop.topology,\n nvt_system,\n nvt_integrator,\n nvt_platform,\n nvt_properties,\n )\n nvt_simulation.context.setPositions(nvt_init_pdb.positions)\n nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)\n with open(\"npt_simulation_box_vectors.pkl\", \"rb\") as f:\n npt_simulation_box_vectors = pk.load(f)\n npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)\n nvt_simulation.context.setPeriodicBoxVectors(\n npt_simulation_box_vectors[0],\n npt_simulation_box_vectors[1],\n npt_simulation_box_vectors[2],\n )\n nvt_last_frame = nvt_output_pdb[:-4] + \"_last_frame.pdb\"\n nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))\n nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))\n nvt_simulation.reporters.append(\n StateDataReporter(\n stdout,\n pdb_freq,\n step=True,\n time=True,\n potentialEnergy=True,\n totalSteps=nvt_steps,\n temperature=True,\n progress=True,\n remainingTime=True,\n speed=True,\n separator=\"\\t\",\n )\n )\n nvt_simulation.minimizeEnergy()\n nvt_simulation.step(nvt_steps)\n nvt_simulation.saveState(\"nvt_simulation.state\")\n state = nvt_simulation.context.getState()\n print(state.getPeriodicBoxVectors())\n nvt_simulation_box_vectors = state.getPeriodicBoxVectors()\n print(nvt_simulation_box_vectors)\n with open(\"nvt_simulation_box_vectors.pkl\", \"wb\") as f:\n pk.dump(nvt_simulation_box_vectors, f)\n print(\"Finished NVT Simulation\")\n\n\ndef run_equilibration():\n\n \"\"\"\n\n Runs systematic simulated annealing followed by\n NPT and NVT equilibration MD simulation.\n\n \"\"\"\n\n cwd = os.getcwd()\n target_dir = cwd + \"/\" + \"equilibration\"\n os.system(\"rm -rf equilibration\")\n os.system(\"mkdir equilibration\")\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"system_TIP3P.inpcrd\",\n target_dir + \"/\" + \"system_TIP3P.inpcrd\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"system_TIP3P.parm7\",\n target_dir + \"/\" + \"system_TIP3P.parm7\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"system_TIP3P.pdb\",\n target_dir + \"/\" + \"system_TIP3P.pdb\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"system_TIP3P.prmtop\",\n target_dir + \"/\" + \"system_TIP3P.prmtop\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"system_TIP3P.rst7\",\n target_dir + \"/\" + \"system_TIP3P.rst7\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"system.pdb\",\n target_dir + \"/\" + \"system.pdb\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"alanine-dipeptide-nowater.pdb\",\n target_dir + \"/\" + \"alanine-dipeptide-nowater.pdb\",\n )\n shutil.copy(\n cwd + \"/\" + \"system_inputs\" + \"/\" + \"input_TIP3P.leap\",\n target_dir + \"/\" + \"input_TIP3P.leap\",\n )\n os.chdir(target_dir)\n simulated_annealing()\n npt_equilibration()\n nvt_equilibration()\n os.system(\"rm -rf system_TIP3P.inpcrd\")\n os.system(\"rm -rf system_TIP3P.parm7\")\n os.system(\"rm -rf system_TIP3P.pdb\")\n os.system(\"rm -rf system_TIP3P.rst7\")\n os.system(\"rm -rf system_TIP3P.prmtop\")\n os.system(\"rm -rf system.pdb\")\n os.system(\"rm -rf alanine-dipeptide-nowater.pdb\")\n os.system(\"rm -rf input_TIP3P.leap\")\n os.chdir(cwd)\n\n\ndef create_starting_structures():\n\n \"\"\"\n Prepares starting structures for Amber GaMD simulations.\n All input files required to run Amber GaMD simulations are\n placed in the starting_structures directory.\n\n \"\"\"\n\n cwd = os.getcwd()\n target_dir = cwd + \"/\" + \"starting_structures\"\n os.system(\"rm -rf starting_structures\")\n os.system(\"mkdir starting_structures\")\n shutil.copy(\n cwd + \"/\" + \"equilibration\" + \"/\" + \"system_nvt_output_last_frame.pdb\",\n target_dir + \"/\" + \"system_nvt_output_last_frame.pdb\",\n )\n os.chdir(target_dir)\n fix_cap_remove_nme(\"system_nvt_output_last_frame.pdb\")\n fix_cap_replace_nme(\"system_nvt_output_last_frame.pdb\")\n # Save the tleap script to file\n with open(\"final_input_TIP3P.leap\", \"w\") as f:\n f.write(\n \"\"\"\n source leaprc.protein.ff14SB\n source leaprc.water.tip3p\n set default FlexibleWater on\n set default PBRadii mbondi2\n pdb = loadpdb system_nvt_output_last_frame.pdb\n saveamberparm pdb system_final.prmtop system_final.inpcrd\n saveamberparm pdb system_final.parm7 system_final.rst7\n savepdb pdb system_final.pdb\n quit\n \"\"\"\n )\n os.system(\"tleap -f final_input_TIP3P.leap\")\n os.system(\"rm -rf leap.log\")\n os.system(\"rm -rf system_nvt_output_last_frame.pdb\")\n os.chdir(cwd)\n\n\ndef add_vec_inpcrd():\n\n \"\"\"\n\n Adds box dimensions captured from the last saved\n frame of the NVT simulations to the inpcrd file.\n Only to be used when the box dimensions are not\n present in the inpcrd file.\n\n \"\"\"\n\n cwd = os.getcwd()\n target_dir = cwd + \"/\" + \"starting_structures\"\n shutil.copy(\n cwd + \"/\" + \"equilibration\" + \"/\" + \"nvt_simulation_box_vectors.pkl\",\n target_dir + \"/\" + \"nvt_simulation_box_vectors.pkl\",\n )\n\n os.chdir(target_dir)\n with open(\"nvt_simulation_box_vectors.pkl\", \"rb\") as f:\n nvt_simulation_box_vectors = pk.load(f)\n nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)\n vectors = (\n (nvt_simulation_box_vectors[0][0]) * 10,\n (nvt_simulation_box_vectors[1][1]) * 10,\n (nvt_simulation_box_vectors[2][2]) * 10,\n )\n vectors = (\n round(vectors[0], 7),\n round(vectors[1], 7),\n round(vectors[2], 7),\n )\n last_line = (\n \" \"\n + str(vectors[0])\n + \" \"\n + str(vectors[1])\n + \" \"\n + str(vectors[2])\n + \" 90.0000000\"\n + \" 90.0000000\"\n + \" 90.0000000\"\n )\n with open(\"system_final.inpcrd\", \"a+\") as f:\n f.write(last_line)\n os.system(\"rm -rf nvt_simulation_box_vectors.pkl\")\n os.chdir(cwd)\n\n\ndef add_vec_prmtop():\n\n \"\"\"\n\n Adds box dimensions captured from the last saved\n frame of the NVT simulations to the prmtop file.\n Only to be used when the box dimensions are not\n present in the prmtop file.\n\n \"\"\"\n\n cwd = os.getcwd()\n target_dir = cwd + \"/\" + \"starting_structures\"\n shutil.copy(\n cwd + \"/\" + \"equilibration\" + \"/\" + \"nvt_simulation_box_vectors.pkl\",\n target_dir + \"/\" + \"nvt_simulation_box_vectors.pkl\",\n )\n\n os.chdir(target_dir)\n with open(\"nvt_simulation_box_vectors.pkl\", \"rb\") as f:\n nvt_simulation_box_vectors = pk.load(f)\n nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)\n vectors = (\n nvt_simulation_box_vectors[0][0],\n nvt_simulation_box_vectors[1][1],\n nvt_simulation_box_vectors[2][2],\n )\n vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)\n oldbeta = \"9.00000000E+01\"\n x = str(vectors[0]) + str(0) + \"E+\" + \"01\"\n y = str(vectors[1]) + str(0) + \"E+\" + \"01\"\n z = str(vectors[2]) + str(0) + \"E+\" + \"01\"\n line1 = \"%FLAG BOX_DIMENSIONS\"\n line2 = \"%FORMAT(5E16.8)\"\n line3 = \" \" + oldbeta + \" \" + x + \" \" + y + \" \" + z\n with open(\"system_final.prmtop\") as i, open(\n \"system_intermediate_final.prmtop\", \"w\"\n ) as f:\n for line in i:\n if line.startswith(\"%FLAG RADIUS_SET\"):\n line = line1 + \"\\n\" + line2 + \"\\n\" + line3 + \"\\n\" + line\n f.write(line)\n os.system(\"rm -rf system_final.prmtop\")\n os.system(\"mv system_intermediate_final.prmtop system_final.prmtop\")\n os.system(\"rm -rf nvt_simulation_box_vectors.pkl\")\n os.chdir(cwd)\n\n\ndef create_filetree(\n nst_lim=26000000,\n ntw_x=1000,\n nt_cmd=1000000,\n n_teb=1000000,\n n_tave=50000,\n ntcmd_prep=200000,\n nteb_prep=200000,\n):\n\n \"\"\"\n\n Creates a directory named gamd_simulations. Inside\n this directory, there are subdirectories for dihedral,\n dual and total potential-boosted GaMD with upper and\n lower threshold boosts separately.\n\n Parameters\n ----------\n\n nst_lim: int\n Total simulation time including preparatory simulation.\n For example, if nst_lim = 26000000, then, we may have\n 2 ns of preparatory simulation i.e. 1000000 preparation steps\n and 50 ns of GaMD simulation i.e. 25000000 simulation steps\n\n ntw_x: int\n Saving coordinates of the simulation every ntw_x\n timesteps. For example, 2 ps implies 1000 timesteps\n\n nt_cmd: int\n Number of initial MD simulation step, 2 ns of\n preparatory simulation requires 1000000 preparation\n timesteps\n\n n_teb: int\n Number of biasing MD simulation steps\n\n n_tave: int\n Number of simulation steps used to calculate the\n average and standard deviation of potential energies\n\n ntcmd_prep: int\n Number of preparation conventional molecular dynamics\n steps.This is used for system equilibration and\n potential energies are not collected for statistics\n\n nteb_prep: int\n Number of preparation biasing molecular dynamics\n simulation steps. This is used for system\n equilibration\n\n \"\"\"\n\n cwd = os.getcwd()\n os.system(\"rm -rf gamd_simulations\")\n os.system(\"mkdir gamd_simulations\")\n os.chdir(cwd + \"/\" + \"gamd_simulations\")\n source_dir = cwd + \"/\" + \"starting_structures\"\n target_dir = cwd + \"/\" + \"gamd_simulations\"\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n for i in range(len(dir_list)):\n os.mkdir(dir_list[i])\n os.chdir(target_dir + \"/\" + dir_list[i])\n shutil.copy(\n source_dir + \"/\" + \"system_final.inpcrd\",\n target_dir + \"/\" + dir_list[i] + \"/\" + \"system_final.inpcrd\",\n )\n shutil.copy(\n source_dir + \"/\" + \"system_final.prmtop\",\n target_dir + \"/\" + dir_list[i] + \"/\" + \"system_final.prmtop\",\n )\n if \"lower\" in dir_list[i]:\n i_E = 1\n if \"upper\" in dir_list[i]:\n i_E = 2\n if \"total\" in dir_list[i]:\n i_gamd = 1\n if \"dihedral\" in dir_list[i]:\n i_gamd = 2\n if \"dual\" in dir_list[i]:\n i_gamd = 3\n with open(\"md.in\", \"w\") as f:\n f.write(\"&cntrl\" + \"\\n\")\n f.write(\" imin = 0, irest = 0, ntx = 1,\" + \"\\n\")\n f.write(\" nstlim = \" + str(nst_lim) + \", dt = 0.002,\" + \"\\n\")\n f.write(\" ntc = 2, ntf = 2, tol = 0.000001,\" + \"\\n\")\n f.write(\" iwrap = 1, ntb = 1, cut = 8.0,\" + \"\\n\")\n f.write(\" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, \" + \"\\n\")\n f.write(\n \" ntpr = 500, ntwx = \" + str(ntw_x) + \", ntwr = 500,\" + \"\\n\"\n )\n f.write(\" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0,\" + \"\\n\")\n f.write(\n \" igamd = \"\n + str(i_gamd)\n + \", iE = \"\n + str(i_E)\n + \", irest_gamd = 0,\"\n + \"\\n\"\n )\n f.write(\n \" ntcmd = \"\n + str(nt_cmd)\n + \", nteb = \"\n + str(n_teb)\n + \", ntave = \"\n + str(n_tave)\n + \",\"\n + \"\\n\"\n )\n f.write(\n \" ntcmdprep = \"\n + str(ntcmd_prep)\n + \", ntebprep = \"\n + str(nteb_prep)\n + \",\"\n + \"\\n\"\n )\n f.write(\" sigma0D = 6.0, sigma0P = 6.0\" + \" \\n\")\n f.write(\"&end\" + \"\\n\")\n os.chdir(target_dir)\n os.chdir(cwd)\n\n\ndef run_simulations():\n\n \"\"\"\n\n Runs GaMD simulations for each of the dihedral, dual and total\n potential boosts for both thresholds i.e. upper and lower potential\n thresholds. (Remember to check md.in files for further details and\n flag information).\n\n \"\"\"\n\n cwd = os.getcwd()\n os.chdir(cwd + \"/\" + \"gamd_simulations\")\n os.chdir(cwd + \"/\" + \"gamd_simulations\" + \"/\" + \"dihedral_threshold_lower\")\n os.system(\n \"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc\"\n )\n os.chdir(cwd + \"/\" + \"gamd_simulations\" + \"/\" + \"dihedral_threshold_upper\")\n os.system(\n \"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc\"\n )\n os.chdir(cwd + \"/\" + \"gamd_simulations\" + \"/\" + \"dual_threshold_lower\")\n os.system(\n \"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc\"\n )\n os.chdir(cwd + \"/\" + \"gamd_simulations\" + \"/\" + \"dual_threshold_upper\")\n os.system(\n \"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc\"\n )\n os.chdir(cwd + \"/\" + \"gamd_simulations\" + \"/\" + \"total_threshold_lower\")\n os.system(\n \"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc\"\n )\n os.chdir(cwd + \"/\" + \"gamd_simulations\" + \"/\" + \"total_threshold_upper\")\n os.system(\n \"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc\"\n )\n os.chdir(cwd + \"/\" + \"gamd_simulations\")\n os.chdir(cwd)\n\n\ndef create_data_files(\n jump=10,\n traj=\"system_final.nc\",\n topology=\"system_final.prmtop\",\n T=300,\n):\n\n \"\"\"\n\n Extracts data from GaMD log files and saves them as\n weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file\n contains data excluding the initial equilibration MD\n simulation steps but trajectory output file has all\n the trajectories including the initial equilibration\n MD steps. This part has ben taken care to make the\n data consistent.\n\n Parameters\n ----------\n\n jump: int\n Every nth frame to be considered for reweighting\n\n traj: str\n System's trajectory file\n\n topology: str\n System's topology file\n\n T: int\n MD simulation temperature\n\n \"\"\"\n\n # To make data consistent with gamd.log and .nc file\n factor = 0.001987 * T\n with open(\"md.in\") as f:\n lines = f.readlines()\n for i in lines:\n if \"nstlim =\" in i:\n nstlim_line = i\n if \"ntcmd =\" in i:\n ntcmd_line = i\n if \"ntwx =\" in i:\n ntwx_line = i\n x = re.findall(r\"\\b\\d+\\b\", ntcmd_line)\n ntcmd = int(x[0])\n x = re.findall(r\"\\b\\d+\\b\", nstlim_line)\n nstlim = int(x[0])\n x = re.findall(r\"\\b\\d+\\b\", ntwx_line)\n ntwx = int(x[1])\n # From the .nc trajectory files, we will not consider ntcmd trajectories\n leave_frames = int(ntcmd / ntwx)\n no_frames = int(nstlim / ntwx)\n # Recheck conditions\n file = open(\"gamd.log\", \"r\")\n number_of_lines = 0\n for line in file:\n line = line.strip(\"\\n\")\n number_of_lines += 1\n file.close()\n f = open(\"gamd.log\")\n fourth_line = f.readlines()[3]\n if str(ntcmd) in fourth_line:\n datapoints = number_of_lines - 4\n if not str(ntcmd) in fourth_line:\n datapoints = number_of_lines - 3\n print(datapoints == int((nstlim - ntcmd) / ntwx))\n # Creating Psi.dat and Phi_Psi.dat\n traj = md.load(traj, top=topology)\n traj = traj[leave_frames:no_frames:jump]\n phi = md.compute_phi(traj)\n phi = phi[1] # 0:indices, 1:phi angles\n phi = np.array([math.degrees(i) for i in phi]) # radians to degrees\n psi = md.compute_psi(traj)\n psi = psi[1] # 0:indices, 1:psi angles\n psi = np.array([math.degrees(i) for i in psi]) # radians to degrees\n df_psi = pd.DataFrame(phi, columns=[\"Psi\"])\n df_psi = df_psi.tail(int(datapoints))\n df_psi.to_csv(\"Psi.dat\", sep=\"\\t\", index=False, header=False)\n df_phi = pd.DataFrame(psi, columns=[\"Phi\"])\n df_phi = df_phi.tail(int(datapoints))\n df_phi_psi = pd.concat([df_phi, df_psi], axis=1)\n df_phi_psi.to_csv(\"Phi_Psi.dat\", sep=\"\\t\", index=False, header=False)\n # Creating weights.dat\n with open(\"gamd.log\") as f:\n lines = f.readlines()\n column_names = lines[2]\n column_names = column_names.replace(\"#\", \"\")\n column_names = column_names.replace(\"\\n\", \"\")\n column_names = column_names.replace(\" \", \"\")\n column_names = column_names.split(\",\")\n list_words = [\"#\"]\n with open(\"gamd.log\") as oldfile, open(\"data.log\", \"w\") as newfile:\n for line in oldfile:\n if not any(word in line for word in list_words):\n newfile.write(line)\n df = pd.read_csv(\"data.log\", delim_whitespace=True, header=None)\n df.columns = column_names\n df[\"dV(kcal/mol)\"] = (\n df[\"Boost-Energy-Potential\"] + df[\"Boost-Energy-Dihedral\"]\n )\n df[\"dV(kbT)\"] = df[\"dV(kcal/mol)\"] / factor\n df_ = df[[\"dV(kbT)\", \"total_nstep\", \"dV(kcal/mol)\"]]\n df_ = df_[::jump]\n df_.to_csv(\"weights.dat\", sep=\"\\t\", index=False, header=False)\n os.system(\"rm -rf data.log\")\n print(df_phi_psi.shape)\n print(df_phi.shape)\n print(df_.shape)\n\n\ndef create_bins(lower_bound, width, upper_bound):\n\n \"\"\"\n\n Creates bin if given the lower and upper bound\n with the wirdth information.\n\n \"\"\"\n\n bins = []\n for low in range(lower_bound, upper_bound, width):\n bins.append([low, low + width])\n return bins\n\n\ndef find_bin(value, bins):\n\n \"\"\"\n\n Finds which value belongs to which bin.\n\n \"\"\"\n\n for i in range(0, len(bins)):\n if bins[i][0] <= value < bins[i][1]:\n return i\n return -1\n\n\ndef reweight_1d(\n binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001\n):\n\n \"\"\"\n\n Reweights boosted potential energies in one-dimension based on\n Maclaurin series expansion to one, two and three degrees.\n\n Parameters\n ----------\n\n binspace: int\n Spacing between the bins\n\n n_structures: int\n Number of structures per bin chosen\n for Weighted Ensemble (WE) simulations\n\n Xdim: list\n Range of dihedral angles\n\n T: float\n MD simulation temperature\n\n min_prob: float\n minimum probability threshold\n\n \"\"\"\n\n beta = 1.0 / (0.001987 * float(T))\n df_Psi = pd.read_csv(\"Psi.dat\", delim_whitespace=True, header=None)\n df_Psi.columns = [\"Psi\"]\n df_weight = pd.read_csv(\"weights.dat\", delim_whitespace=True, header=None)\n df_weight.columns = [\"dV_kBT\", \"timestep\", \"dVkcalmol\"]\n\n sum_total = df_Psi.shape[0]\n binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)\n hist, hist_edges = np.histogram(df_Psi[[\"Psi\"]], bins=binsX, weights=None)\n pstarA = [i / sum_total for i in list(hist)]\n bins = create_bins(\n lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])\n )\n\n data = df_Psi[\"Psi\"].values.tolist()\n binned_weights = []\n for value in data:\n bin_index = find_bin(value, bins)\n binned_weights.append(bin_index)\n df_index = pd.DataFrame(binned_weights)\n df_index.columns = [\"index\"]\n\n df = pd.concat([df_index, df_Psi, df_weight], axis=1)\n dV_c1 = []\n dV_c2 = []\n dV_c3 = []\n dV = []\n for i in range(len(bins)):\n df_i = df.loc[(df[\"index\"] == i)]\n dV_list = df_i[\"dVkcalmol\"].values.tolist()\n if len(dV_list) >= 10:\n dV_c1.append(statistics.mean(dV_list))\n dV_c2.append(\n statistics.mean([i ** 2 for i in dV_list])\n - (statistics.mean(dV_list)) ** 2\n )\n dV_c3.append(\n statistics.mean([i ** 3 for i in dV_list])\n - 3\n * (statistics.mean([i ** 2 for i in dV_list]))\n * (statistics.mean(dV_list))\n + 2 * (statistics.mean(dV_list)) ** 3\n )\n if len(dV_list) < 10:\n dV_c1.append(0)\n dV_c2.append(0)\n dV_c3.append(0)\n dV.append(dV_list)\n\n c1 = [i * beta for i in dV_c1]\n c2 = [i * ((beta ** 2) / 2) for i in dV_c2]\n c3 = [i * ((beta ** 3) / 6) for i in dV_c3]\n c1 = c1\n c12 = [a + b for a, b in zip(c1, c2)]\n c123 = [a + b for a, b in zip(c12, c3)]\n for i in range(len(c1)):\n if c1[i] >= 700:\n c1[i] = 700\n for i in range(len(c12)):\n if c12[i] >= 700:\n c12[i] = 700\n for i in range(len(c123)):\n if c123[i] >= 700:\n c123[i] = 700\n ensemble_average_c1 = [exp(i) for i in c1]\n ensemble_average_c12 = [exp(i) for i in c12]\n ensemble_average_c123 = [exp(i) for i in c123]\n numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]\n numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]\n numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]\n\n #### c1\n denominatorc1 = []\n for i in range(len(bins)):\n product_c1 = pstarA[i] * ensemble_average_c1[i]\n denominatorc1.append(product_c1)\n denominator_c1 = sum(denominatorc1)\n pA_c1 = [i / denominator_c1 for i in numerator_c1]\n #### c12\n denominatorc12 = []\n for i in range(len(bins)):\n product_c12 = pstarA[i] * ensemble_average_c12[i]\n denominatorc12.append(product_c12)\n denominator_c12 = sum(denominatorc12)\n pA_c12 = [i / denominator_c12 for i in numerator_c12]\n #### c123\n denominatorc123 = []\n for i in range(len(bins)):\n product_c123 = pstarA[i] * ensemble_average_c123[i]\n denominatorc123.append(product_c123)\n denominator_c123 = sum(denominatorc123)\n pA_c123 = [i / denominator_c123 for i in numerator_c123]\n\n data_c1 = list(zip(bins, pA_c1))\n data_c12 = list(zip(bins, pA_c12))\n data_c123 = list(zip(bins, pA_c123))\n\n df_c1 = pd.DataFrame(data_c1, columns=[\"bins\", \"pA_c1\"])\n df_c12 = pd.DataFrame(data_c12, columns=[\"bins\", \"pA_c12\"])\n df_c123 = pd.DataFrame(data_c123, columns=[\"bins\", \"pA_c123\"])\n\n ####c1\n df_c1.to_csv(\"c1_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n with open(\"c1_1d.txt\", \"r\") as f1, open(\"pA_c1_1d.txt\", \"w\") as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c1_1d.txt\")\n ####c12\n df_c12.to_csv(\"c12_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n with open(\"c12_1d.txt\", \"r\") as f1, open(\"pA_c12_1d.txt\", \"w\") as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c12_1d.txt\")\n ####c123\n df_c123.to_csv(\"c123_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n with open(\"c123_1d.txt\", \"r\") as f1, open(\"pA_c123_1d.txt\", \"w\") as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c123_1d.txt\")\n\n ####c1_arranged\n df_c1_arranged = df_c1.sort_values(by=\"pA_c1\", ascending=False)\n df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]\n df_c1_arranged.to_csv(\n \"c1_arranged_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c1_arranged_1d.txt\", \"r\") as f1, open(\n \"pA_c1_arranged_1d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c1_arranged_1d.txt\")\n ####c12_arranged\n df_c12_arranged = df_c12.sort_values(by=\"pA_c12\", ascending=False)\n df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]\n df_c12_arranged.to_csv(\n \"c12_arranged_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c12_arranged_1d.txt\", \"r\") as f1, open(\n \"pA_c12_arranged_1d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c12_arranged_1d.txt\")\n ####c123_arranged\n df_c123_arranged = df_c123.sort_values(by=\"pA_c123\", ascending=False)\n df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]\n df_c123_arranged.to_csv(\n \"c123_arranged_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c123_arranged_1d.txt\", \"r\") as f1, open(\n \"pA_c123_arranged_1d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c123_arranged_1d.txt\")\n\n ####c1_arranged\n df_c1_arranged[\"index\"] = df_c1_arranged.index\n index_list_c1 = df_c1_arranged[\"index\"].tolist()\n df[\"frame_index\"] = df.index\n df_frame_index = df[[\"frame_index\", \"index\"]]\n frame_indices_c1 = []\n index_indces_c1 = []\n for i in index_list_c1:\n df_index_list_c1 = df_frame_index.loc[df_frame_index[\"index\"] == i]\n frame_c1 = df_index_list_c1[\"frame_index\"].tolist()\n frame_indices_c1.append(frame_c1)\n index_c1 = [i] * len(frame_c1)\n index_indces_c1.append(index_c1)\n frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]\n index_indces_c1 = [item for elem in index_indces_c1 for item in elem]\n df_c1_frame = pd.DataFrame(frame_indices_c1, columns=[\"frame_index\"])\n df_c1_index = pd.DataFrame(index_indces_c1, columns=[\"index\"])\n df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)\n df_c1_frame_index = df_c1_frame_index.groupby(\"index\").filter(\n lambda x: len(x) >= 10\n )\n df_c1_frame_index.to_csv(\n \"c1_frame_index_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c1_frame_index_1d.txt\", \"r\") as f1, open(\n \"c1_frame_1d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c1_frame_index_1d.txt\")\n ####c12_arranged\n df_c12_arranged[\"index\"] = df_c12_arranged.index\n index_list_c12 = df_c12_arranged[\"index\"].tolist()\n df[\"frame_index\"] = df.index\n df_frame_index = df[[\"frame_index\", \"index\"]]\n frame_indices_c12 = []\n index_indces_c12 = []\n for i in index_list_c12:\n df_index_list_c12 = df_frame_index.loc[df_frame_index[\"index\"] == i]\n frame_c12 = df_index_list_c12[\"frame_index\"].tolist()\n frame_indices_c12.append(frame_c12)\n index_c12 = [i] * len(frame_c12)\n index_indces_c12.append(index_c12)\n frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]\n index_indces_c12 = [item for elem in index_indces_c12 for item in elem]\n df_c12_frame = pd.DataFrame(frame_indices_c12, columns=[\"frame_index\"])\n df_c12_index = pd.DataFrame(index_indces_c12, columns=[\"index\"])\n df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)\n df_c12_frame_index = df_c12_frame_index.groupby(\"index\").filter(\n lambda x: len(x) >= 10\n )\n df_c12_frame_index.to_csv(\n \"c12_frame_index_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c12_frame_index_1d.txt\", \"r\") as f1, open(\n \"c12_frame_1d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c12_frame_index_1d.txt\")\n ####c123_arranged\n df_c123_arranged[\"index\"] = df_c123_arranged.index\n index_list_c123 = df_c123_arranged[\"index\"].tolist()\n df[\"frame_index\"] = df.index\n df_frame_index = df[[\"frame_index\", \"index\"]]\n frame_indices_c123 = []\n index_indces_c123 = []\n for i in index_list_c123:\n df_index_list_c123 = df_frame_index.loc[df_frame_index[\"index\"] == i]\n frame_c123 = df_index_list_c123[\"frame_index\"].tolist()\n frame_indices_c123.append(frame_c123)\n index_c123 = [i] * len(frame_c123)\n index_indces_c123.append(index_c123)\n frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]\n index_indces_c123 = [item for elem in index_indces_c123 for item in elem]\n df_c123_frame = pd.DataFrame(frame_indices_c123, columns=[\"frame_index\"])\n df_c123_index = pd.DataFrame(index_indces_c123, columns=[\"index\"])\n df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)\n df_c123_frame_index = df_c123_frame_index.groupby(\"index\").filter(\n lambda x: len(x) >= 10\n )\n df_c123_frame_index.to_csv(\n \"c123_frame_index_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c123_frame_index_1d.txt\", \"r\") as f1, open(\n \"c123_frame_1d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c123_frame_index_1d.txt\")\n\n ####c1\n indices_c1_1d = df_c1_frame_index[\"index\"].unique()\n frames_c1 = []\n for i in indices_c1_1d:\n x = df_c1_frame_index.loc[df_c1_frame_index[\"index\"] == i]\n y = x[\"frame_index\"].values.tolist()\n z = random.sample(y, n_structures)\n frames_c1.append(z)\n frames_c1_1d = [item for elem in frames_c1 for item in elem]\n with open(\"frames_c1_1d.pickle\", \"wb\") as f:\n pk.dump(frames_c1_1d, f)\n with open(\"indices_c1_1d.pickle\", \"wb\") as f:\n pk.dump(indices_c1_1d, f)\n ####c12\n indices_c12_1d = df_c12_frame_index[\"index\"].unique()\n frames_c12 = []\n for i in indices_c12_1d:\n x = df_c12_frame_index.loc[df_c12_frame_index[\"index\"] == i]\n y = x[\"frame_index\"].values.tolist()\n z = random.sample(y, n_structures)\n frames_c12.append(z)\n frames_c12_1d = [item for elem in frames_c12 for item in elem]\n with open(\"frames_c12_1d.pickle\", \"wb\") as f:\n pk.dump(frames_c12_1d, f)\n with open(\"indices_c12_1d.pickle\", \"wb\") as f:\n pk.dump(indices_c12_1d, f)\n ####c123\n indices_c123_1d = df_c123_frame_index[\"index\"].unique()\n frames_c123 = []\n for i in indices_c123_1d:\n x = df_c123_frame_index.loc[df_c123_frame_index[\"index\"] == i]\n y = x[\"frame_index\"].values.tolist()\n z = random.sample(y, n_structures)\n frames_c123.append(z)\n frames_c123_1d = [item for elem in frames_c123 for item in elem]\n with open(\"frames_c123_1d.pickle\", \"wb\") as f:\n pk.dump(frames_c123_1d, f)\n with open(\"indices_c123_1d.pickle\", \"wb\") as f:\n pk.dump(indices_c123_1d, f)\n ##saving probabilities for each selected frame\n ####c1\n prob_c1_1d_list = []\n for i in indices_c1_1d:\n prob_c1_1d_list.append(df_c1[\"pA_c1\"][i])\n prob_c1_1d_list = list(\n itertools.chain.from_iterable(\n itertools.repeat(x, n_structures) for x in prob_c1_1d_list\n )\n )\n prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]\n with open(\"prob_c1_1d_list.pickle\", \"wb\") as f:\n pk.dump(prob_c1_1d_list, f)\n ####c12\n prob_c12_1d_list = []\n for i in indices_c12_1d:\n prob_c12_1d_list.append(df_c12[\"pA_c12\"][i])\n prob_c12_1d_list = list(\n itertools.chain.from_iterable(\n itertools.repeat(x, n_structures) for x in prob_c12_1d_list\n )\n )\n prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]\n with open(\"prob_c12_1d_list.pickle\", \"wb\") as f:\n pk.dump(prob_c12_1d_list, f)\n ####c123\n prob_c123_1d_list = []\n for i in indices_c123_1d:\n prob_c123_1d_list.append(df_c123[\"pA_c123\"][i])\n prob_c123_1d_list = list(\n itertools.chain.from_iterable(\n itertools.repeat(x, n_structures) for x in prob_c123_1d_list\n )\n )\n prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]\n with open(\"prob_c123_1d_list.pickle\", \"wb\") as f:\n pk.dump(prob_c123_1d_list, f)\n\n ref_df_1d = pd.DataFrame(bins, columns=[\"dim0\", \"dim1\"])\n ref_df_1d[\"bins\"] = ref_df_1d.agg(\n lambda x: f\"[{x['dim0']} , {x['dim1']}]\", axis=1\n )\n ref_df_1d = ref_df_1d[[\"bins\"]]\n index_ref_1d = []\n for i in range(len(bins)):\n index_ref_1d.append(i)\n index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=[\"index\"])\n df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)\n df_ref_1d.to_csv(\"ref_1d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n\n df.to_csv(\"df_1d.csv\", index=False)\n os.system(\"rm -rf __pycache__\")\n print(\"Successfully Completed Reweighing\")\n\n\ndef reweight_2d(\n binspace=10,\n n_structures=4,\n Xdim=[-180, 180],\n Ydim=[-180, 180],\n T=300.0,\n min_prob=0.000001,\n):\n\n \"\"\"\n\n Reweights boosted potential energies in two-dimensions\n based on Maclaurin series expansion to one, two and\n three degrees.\n\n Parameters\n ----------\n\n binspace: int\n Spacing between the bins\n\n n_structures: int\n Number of structures per bin chosen\n for Weighted Ensemble (WE) simulations\n\n Xdim: list\n Range of dihedral angles (1st dimension)\n\n Ydim: list\n Range of dihedral angles (2nd dimension)\n\n T: float\n MD simulation temperature\n\n min_prob: float\n minimum probability threshold\n\n \"\"\"\n\n beta = 1.0 / (0.001987 * float(T))\n df_Phi_Psi = pd.read_csv(\"Phi_Psi.dat\", delim_whitespace=True, header=None)\n df_Phi_Psi.columns = [\"Phi\", \"Psi\"]\n df_weight = pd.read_csv(\"weights.dat\", delim_whitespace=True, header=None)\n df_weight.columns = [\"dV_kBT\", \"timestep\", \"dVkcalmol\"]\n\n sum_total = df_Phi_Psi.shape[0]\n binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)\n binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)\n hist2D, hist_edgesX, hist_edgesY = np.histogram2d(\n df_Phi_Psi[\"Phi\"].values.tolist(),\n df_Phi_Psi[\"Psi\"].values.tolist(),\n bins=(binsX, binsY),\n weights=None,\n )\n pstarA_2D = [i / sum_total for i in list(hist2D)]\n bins_tuple_X = create_bins(\n lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])\n )\n bins_tuple_Y = create_bins(\n lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])\n )\n bins = []\n for i in range(len(bins_tuple_X)):\n for j in range(len(bins_tuple_Y)):\n bins.append([bins_tuple_X[i], bins_tuple_Y[j]])\n pstarA = [item for elem in pstarA_2D for item in elem]\n hist = [item for elem in hist2D for item in elem]\n hist = [int(i) for i in hist]\n\n data_X = df_Phi_Psi[\"Phi\"].values.tolist()\n binned_weights_X = []\n for value in data_X:\n bin_index_X = find_bin(value, bins_tuple_X)\n binned_weights_X.append(bin_index_X)\n data_Y = df_Phi_Psi[\"Psi\"].values.tolist()\n binned_weights_Y = []\n for value in data_Y:\n bin_index_Y = find_bin(value, bins_tuple_Y)\n binned_weights_Y.append(bin_index_Y)\n binned_weights_2D = []\n for i in range(len(binned_weights_X)):\n binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])\n binned_weights = []\n for i in range(len(binned_weights_2D)):\n binned_weights.append(\n (binned_weights_2D[i][0] * len(bins_tuple_Y))\n + (binned_weights_2D[i][1] + 1)\n )\n df_index = pd.DataFrame(binned_weights)\n df_index.columns = [\"index\"]\n df_index[\"index\"] = df_index[\"index\"] - 1\n\n df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)\n dV_c1 = []\n dV_c2 = []\n dV_c3 = []\n dV = []\n for i in range(len(bins)):\n df_i = df.loc[(df[\"index\"] == i)]\n dV_list = df_i[\"dVkcalmol\"].values.tolist()\n if len(dV_list) >= 10:\n dV_c1.append(statistics.mean(dV_list))\n dV_c2.append(\n statistics.mean([i ** 2 for i in dV_list])\n - (statistics.mean(dV_list)) ** 2\n )\n dV_c3.append(\n statistics.mean([i ** 3 for i in dV_list])\n - 3\n * (statistics.mean([i ** 2 for i in dV_list]))\n * (statistics.mean(dV_list))\n + 2 * (statistics.mean(dV_list)) ** 3\n )\n if len(dV_list) < 10:\n dV_c1.append(0)\n dV_c2.append(0)\n dV_c3.append(0)\n dV.append(dV_list)\n\n c1 = [i * beta for i in dV_c1]\n c2 = [i * ((beta ** 2) / 2) for i in dV_c2]\n c3 = [i * ((beta ** 3) / 6) for i in dV_c3]\n c1 = c1\n c12 = [a + b for a, b in zip(c1, c2)]\n c123 = [a + b for a, b in zip(c12, c3)]\n for i in range(len(c1)):\n if c1[i] >= 700:\n c1[i] = 700\n for i in range(len(c12)):\n if c12[i] >= 700:\n c12[i] = 700\n for i in range(len(c123)):\n if c123[i] >= 700:\n c123[i] = 700\n ensemble_average_c1 = [exp(i) for i in c1]\n ensemble_average_c12 = [exp(i) for i in c12]\n ensemble_average_c123 = [exp(i) for i in c123]\n numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]\n numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]\n numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]\n\n #### c1\n denominatorc1 = []\n for i in range(len(bins)):\n product_c1 = pstarA[i] * ensemble_average_c1[i]\n denominatorc1.append(product_c1)\n denominator_c1 = sum(denominatorc1)\n pA_c1 = [i / denominator_c1 for i in numerator_c1]\n #### c12\n denominatorc12 = []\n for i in range(len(bins)):\n product_c12 = pstarA[i] * ensemble_average_c12[i]\n denominatorc12.append(product_c12)\n denominator_c12 = sum(denominatorc12)\n pA_c12 = [i / denominator_c12 for i in numerator_c12]\n #### c123\n denominatorc123 = []\n for i in range(len(bins)):\n product_c123 = pstarA[i] * ensemble_average_c123[i]\n denominatorc123.append(product_c123)\n denominator_c123 = sum(denominatorc123)\n pA_c123 = [i / denominator_c123 for i in numerator_c123]\n\n data_c1 = list(zip(bins, pA_c1))\n data_c12 = list(zip(bins, pA_c12))\n data_c123 = list(zip(bins, pA_c123))\n\n df_c1 = pd.DataFrame(data_c1, columns=[\"bins\", \"pA_c1\"])\n df_c12 = pd.DataFrame(data_c12, columns=[\"bins\", \"pA_c12\"])\n df_c123 = pd.DataFrame(data_c123, columns=[\"bins\", \"pA_c123\"])\n\n df_c1.to_csv(\"c1_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n with open(\"c1_2d.txt\", \"r\") as f1, open(\"pA_c1_2d.txt\", \"w\") as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c1_2d.txt\")\n ####c12\n df_c12.to_csv(\"c12_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n with open(\"c12_2d.txt\", \"r\") as f1, open(\"pA_c12_2d.txt\", \"w\") as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c12_2d.txt\")\n ####c123\n df_c123.to_csv(\"c123_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n with open(\"c123_2d.txt\", \"r\") as f1, open(\"pA_c123_2d.txt\", \"w\") as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c123_2d.txt\")\n\n ####c1_arranged\n df_c1_arranged = df_c1.sort_values(by=\"pA_c1\", ascending=False)\n df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]\n df_c1_arranged.to_csv(\n \"c1_arranged_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c1_arranged_2d.txt\", \"r\") as f1, open(\n \"pA_c1_arranged_2d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c1_arranged_2d.txt\")\n ####c12_arranged\n df_c12_arranged = df_c12.sort_values(by=\"pA_c12\", ascending=False)\n df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]\n df_c12_arranged.to_csv(\n \"c12_arranged_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c12_arranged_2d.txt\", \"r\") as f1, open(\n \"pA_c12_arranged_2d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c12_arranged_2d.txt\")\n ####c123_arranged\n df_c123_arranged = df_c123.sort_values(by=\"pA_c123\", ascending=False)\n df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]\n df_c123_arranged.to_csv(\n \"c123_arranged_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c123_arranged_2d.txt\", \"r\") as f1, open(\n \"pA_c123_arranged_2d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c123_arranged_2d.txt\")\n\n ####c1_arranged\n df_c1_arranged[\"index\"] = df_c1_arranged.index\n index_list_c1 = df_c1_arranged[\"index\"].tolist()\n df[\"frame_index\"] = df.index\n df_frame_index = df[[\"frame_index\", \"index\"]]\n frame_indices_c1 = []\n index_indces_c1 = []\n for i in index_list_c1:\n df_index_list_c1 = df_frame_index.loc[df_frame_index[\"index\"] == i]\n frame_c1 = df_index_list_c1[\"frame_index\"].tolist()\n frame_indices_c1.append(frame_c1)\n index_c1 = [i] * len(frame_c1)\n index_indces_c1.append(index_c1)\n frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]\n index_indces_c1 = [item for elem in index_indces_c1 for item in elem]\n df_c1_frame = pd.DataFrame(frame_indices_c1, columns=[\"frame_index\"])\n df_c1_index = pd.DataFrame(index_indces_c1, columns=[\"index\"])\n df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)\n df_c1_frame_index = df_c1_frame_index.groupby(\"index\").filter(\n lambda x: len(x) >= 10\n )\n df_c1_frame_index.to_csv(\n \"c1_frame_index_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c1_frame_index_2d.txt\", \"r\") as f1, open(\n \"c1_frame_2d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c1_frame_index_2d.txt\")\n ####c12_arranged\n df_c12_arranged[\"index\"] = df_c12_arranged.index\n index_list_c12 = df_c12_arranged[\"index\"].tolist()\n df[\"frame_index\"] = df.index\n df_frame_index = df[[\"frame_index\", \"index\"]]\n frame_indices_c12 = []\n index_indces_c12 = []\n for i in index_list_c12:\n df_index_list_c12 = df_frame_index.loc[df_frame_index[\"index\"] == i]\n frame_c12 = df_index_list_c12[\"frame_index\"].tolist()\n frame_indices_c12.append(frame_c12)\n index_c12 = [i] * len(frame_c12)\n index_indces_c12.append(index_c12)\n frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]\n index_indces_c12 = [item for elem in index_indces_c12 for item in elem]\n df_c12_frame = pd.DataFrame(frame_indices_c12, columns=[\"frame_index\"])\n df_c12_index = pd.DataFrame(index_indces_c12, columns=[\"index\"])\n df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)\n df_c12_frame_index = df_c12_frame_index.groupby(\"index\").filter(\n lambda x: len(x) >= 10\n )\n df_c12_frame_index.to_csv(\n \"c12_frame_index_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c12_frame_index_2d.txt\", \"r\") as f1, open(\n \"c12_frame_2d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c12_frame_index_2d.txt\")\n ####c123_arranged\n df_c123_arranged[\"index\"] = df_c123_arranged.index\n df_c123_arranged[\"index\"] = df_c123_arranged.index\n index_list_c123 = df_c123_arranged[\"index\"].tolist()\n df[\"frame_index\"] = df.index\n df_frame_index = df[[\"frame_index\", \"index\"]]\n frame_indices_c123 = []\n index_indces_c123 = []\n for i in index_list_c123:\n df_index_list_c123 = df_frame_index.loc[df_frame_index[\"index\"] == i]\n frame_c123 = df_index_list_c123[\"frame_index\"].tolist()\n frame_indices_c123.append(frame_c123)\n index_c123 = [i] * len(frame_c123)\n index_indces_c123.append(index_c123)\n frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]\n index_indces_c123 = [item for elem in index_indces_c123 for item in elem]\n df_c123_frame = pd.DataFrame(frame_indices_c123, columns=[\"frame_index\"])\n df_c123_index = pd.DataFrame(index_indces_c123, columns=[\"index\"])\n df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)\n df_c123_frame_index = df_c123_frame_index.groupby(\"index\").filter(\n lambda x: len(x) >= 10\n )\n df_c123_frame_index.to_csv(\n \"c123_frame_index_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\"\n )\n with open(\"c123_frame_index_2d.txt\", \"r\") as f1, open(\n \"c123_frame_2d.txt\", \"w\"\n ) as f2:\n for line in f1:\n f2.write(line.replace('\"', \"\").replace(\"'\", \"\"))\n os.system(\"rm -rf c123_frame_index_2d.txt\")\n\n ####c1\n indices_c1_2d = df_c1_frame_index[\"index\"].unique()\n frames_c1 = []\n for i in indices_c1_2d:\n x = df_c1_frame_index.loc[df_c1_frame_index[\"index\"] == i]\n y = x[\"frame_index\"].values.tolist()\n z = random.sample(y, n_structures)\n frames_c1.append(z)\n frames_c1_2d = [item for elem in frames_c1 for item in elem]\n with open(\"frames_c1_2d.pickle\", \"wb\") as f:\n pk.dump(frames_c1_2d, f)\n with open(\"indices_c1_2d.pickle\", \"wb\") as f:\n pk.dump(indices_c1_2d, f)\n ####c12\n indices_c12_2d = df_c12_frame_index[\"index\"].unique()\n frames_c12 = []\n for i in indices_c12_2d:\n x = df_c12_frame_index.loc[df_c12_frame_index[\"index\"] == i]\n y = x[\"frame_index\"].values.tolist()\n z = random.sample(y, n_structures)\n frames_c12.append(z)\n frames_c12_2d = [item for elem in frames_c12 for item in elem]\n with open(\"frames_c12_2d.pickle\", \"wb\") as f:\n pk.dump(frames_c12_2d, f)\n with open(\"indices_c12_2d.pickle\", \"wb\") as f:\n pk.dump(indices_c12_2d, f)\n ####c123\n indices_c123_2d = df_c123_frame_index[\"index\"].unique()\n frames_c123 = []\n for i in indices_c123_2d:\n x = df_c123_frame_index.loc[df_c123_frame_index[\"index\"] == i]\n y = x[\"frame_index\"].values.tolist()\n z = random.sample(y, n_structures)\n frames_c123.append(z)\n frames_c123_2d = [item for elem in frames_c123 for item in elem]\n with open(\"frames_c123_2d.pickle\", \"wb\") as f:\n pk.dump(frames_c123_2d, f)\n with open(\"indices_c123_2d.pickle\", \"wb\") as f:\n pk.dump(indices_c123_2d, f)\n ##saving probabilities for each selected frame\n ####c1\n prob_c1_2d_list = []\n for i in indices_c1_2d:\n prob_c1_2d_list.append(df_c1[\"pA_c1\"][i])\n prob_c1_2d_list = list(\n itertools.chain.from_iterable(\n itertools.repeat(x, n_structures) for x in prob_c1_2d_list\n )\n )\n prob_c1_2d_list = [x / n_structures for x in prob_c1_2d_list]\n with open(\"prob_c1_2d_list.pickle\", \"wb\") as f:\n pk.dump(prob_c1_2d_list, f)\n ####c12\n prob_c12_2d_list = []\n for i in indices_c12_2d:\n prob_c12_2d_list.append(df_c12[\"pA_c12\"][i])\n prob_c12_2d_list = list(\n itertools.chain.from_iterable(\n itertools.repeat(x, n_structures) for x in prob_c12_2d_list\n )\n )\n prob_c12_2d_list = [x / n_structures for x in prob_c12_2d_list]\n with open(\"prob_c12_2d_list.pickle\", \"wb\") as f:\n pk.dump(prob_c12_2d_list, f)\n ####c123\n prob_c123_2d_list = []\n for i in indices_c123_2d:\n prob_c123_2d_list.append(df_c123[\"pA_c123\"][i])\n prob_c123_2d_list = list(\n itertools.chain.from_iterable(\n itertools.repeat(x, n_structures) for x in prob_c123_2d_list\n )\n )\n prob_c123_2d_list = [x / n_structures for x in prob_c123_2d_list]\n with open(\"prob_c123_2d_list.pickle\", \"wb\") as f:\n pk.dump(prob_c123_2d_list, f)\n\n ref_df_2d = pd.DataFrame(bins, columns=[\"binsX\", \"binsY\"])\n ref_df_2d[\"XY\"] = ref_df_2d.agg(\n lambda x: f\"{x['binsX']} , {x['binsX']}\", axis=1\n )\n ref_df_2d = ref_df_2d[[\"XY\"]]\n index_ref_2d = []\n for i in range(len(bins_tuple_X) * len(bins_tuple_Y)):\n index_ref_2d.append(i)\n index_ref_df_2d = pd.DataFrame(index_ref_2d, columns=[\"index\"])\n df_ref_2d = pd.concat([ref_df_2d, index_ref_df_2d], axis=1)\n df_ref_2d.to_csv(\"ref_2d.txt\", header=True, index=None, sep=\" \", mode=\"w\")\n\n df.to_csv(\"df_2d.csv\", index=False)\n os.system(\"rm -rf __pycache__\")\n print(\"Successfully Completed Reweighing\")\n\n\ndef save_frames():\n\n \"\"\"\n\n Creates a directory named we_structures. Inside this\n directory, there are six subdirectories (three for\n one-dimension reweighing and other three for\n two-dimensional reweighted frames). All frames\n for one, two and three-degree Maclaurin series\n expanded reweighted frames are present in their\n respective folders.\n\n \"\"\"\n\n cwd = os.getcwd()\n os.system(\"rm -rf we_structures\")\n os.system(\"mkdir we_structures\")\n os.chdir(cwd + \"/\" + \"we_structures\")\n os.system(\"mkdir 1d_c1\")\n os.system(\"mkdir 1d_c12\")\n os.system(\"mkdir 1d_c123\")\n os.system(\"mkdir 2d_c1\")\n os.system(\"mkdir 2d_c12\")\n os.system(\"mkdir 2d_c123\")\n os.chdir(cwd)\n df1 = pd.read_csv(\"df_1d.csv\")\n index = df1[\"index\"].tolist()\n frame = df1[\"frame_index\"].tolist()\n index_frame = dict(zip(frame, index))\n df2 = pd.read_csv(\"ref_1d.txt\", sep=\" \", delimiter=None, header=\"infer\")\n index_ = df2[\"index\"].tolist()\n bins = df2[\"bins\"].tolist()\n index_bins = dict(zip(index_, bins))\n #### 1d\n with open(\"frames_c1_1d.pickle\", \"rb\") as input_file:\n frames_c1_1d = pk.load(input_file)\n for i in frames_c1_1d:\n j = index_frame[i]\n frame_index = frames_c1_1d.index(i)\n k = index_bins[j]\n k = k.strip(\"[]\")\n k = k.replace(\" , \", \"_\")\n # traj = pt.load(\"system_final.nc\", top=\"system_final.prmtop\", frame_indices=[i])\n traj = md.load_frame(\n \"system_final.nc\", top=\"system_final.prmtop\", index=i\n )\n frame_pdb = str(frame_index) + \"_\" + k + \"_1d_c1_\" + str(i) + \".pdb\"\n # pt.save(frame_pdb, traj, overwrite=True)\n traj.save_pdb(frame_pdb, force_overwrite=True)\n target_dir = cwd + \"/\" + \"we_structures\" + \"/\" + \"1d_c1\"\n shutil.move(cwd + \"/\" + frame_pdb, target_dir + \"/\" + frame_pdb)\n with open(\"frames_c12_1d.pickle\", \"rb\") as input_file:\n frames_c12_1d = pk.load(input_file)\n for i in frames_c12_1d:\n j = index_frame[i]\n frame_index = frames_c12_1d.index(i)\n k = index_bins[j]\n k = k.strip(\"[]\")\n k = k.replace(\" , \", \"_\")\n # traj = pt.load(\"system_final.nc\", top=\"system_final.prmtop\", frame_indices=[i])\n traj = md.load_frame(\n \"system_final.nc\", top=\"system_final.prmtop\", index=i\n )\n frame_pdb = str(frame_index) + \"_\" + k + \"_1d_c12_\" + str(i) + \".pdb\"\n # pt.save(frame_pdb, traj, overwrite=True)\n traj.save_pdb(frame_pdb, force_overwrite=True)\n target_dir = cwd + \"/\" + \"we_structures\" + \"/\" + \"1d_c12\"\n shutil.move(cwd + \"/\" + frame_pdb, target_dir + \"/\" + frame_pdb)\n with open(\"frames_c123_1d.pickle\", \"rb\") as input_file:\n frames_c123_1d = pk.load(input_file)\n for i in frames_c123_1d:\n j = index_frame[i]\n frame_index = frames_c123_1d.index(i)\n k = index_bins[j]\n k = k.strip(\"[]\")\n k = k.replace(\" , \", \"_\")\n # traj = pt.load(\"system_final.nc\", top=\"system_final.prmtop\", frame_indices=[i])\n traj = md.load_frame(\n \"system_final.nc\", top=\"system_final.prmtop\", index=i\n )\n frame_pdb = str(frame_index) + \"_\" + k + \"_1d_c123_\" + str(i) + \".pdb\"\n # pt.save(frame_pdb, traj, overwrite=True)\n traj.save_pdb(frame_pdb, force_overwrite=True)\n target_dir = cwd + \"/\" + \"we_structures\" + \"/\" + \"1d_c123\"\n shutil.move(cwd + \"/\" + frame_pdb, target_dir + \"/\" + frame_pdb)\n df1 = pd.read_csv(\"df_2d.csv\")\n index = df1[\"index\"].tolist()\n frame = df1[\"frame_index\"].tolist()\n index_frame = dict(zip(frame, index))\n df2 = pd.read_csv(\"ref_2d.txt\", sep=\" \", delimiter=None, header=\"infer\")\n index_ = df2[\"index\"].tolist()\n bins = df2[\"XY\"].tolist()\n index_bins = dict(zip(index_, bins))\n #### 2d\n with open(\"frames_c1_2d.pickle\", \"rb\") as input_file:\n frames_c1_2d = pk.load(input_file)\n for i in frames_c1_2d:\n j = index_frame[i]\n frame_index = frames_c1_2d.index(i)\n k = index_bins[j]\n k = k.strip(\"[]\")\n k = k.replace(\"] , [\", \"_\")\n k = k.replace(\", \", \"_\")\n # traj = pt.load(\"system_final.nc\", top=\"system_final.prmtop\", frame_indices=[i])\n traj = md.load_frame(\n \"system_final.nc\", top=\"system_final.prmtop\", index=i\n )\n frame_pdb = str(frame_index) + \"_\" + k + \"_2d_c1_\" + str(i) + \".pdb\"\n # pt.save(frame_pdb, traj, overwrite=True)\n traj.save_pdb(frame_pdb, force_overwrite=True)\n target_dir = cwd + \"/\" + \"we_structures\" + \"/\" + \"2d_c1\"\n shutil.move(cwd + \"/\" + frame_pdb, target_dir + \"/\" + frame_pdb)\n with open(\"frames_c12_2d.pickle\", \"rb\") as input_file:\n frames_c12_2d = pk.load(input_file)\n for i in frames_c12_2d:\n j = index_frame[i]\n frame_index = frames_c12_2d.index(i)\n k = index_bins[j]\n k = k.strip(\"[]\")\n k = k.replace(\"] , [\", \"_\")\n k = k.replace(\", \", \"_\")\n # traj = pt.load(\"system_final.nc\", top=\"system_final.prmtop\", frame_indices=[i])\n traj = md.load_frame(\n \"system_final.nc\", top=\"system_final.prmtop\", index=i\n )\n frame_pdb = str(frame_index) + \"_\" + k + \"_2d_c12_\" + str(i) + \".pdb\"\n # pt.save(frame_pdb, traj, overwrite=True)\n traj.save_pdb(frame_pdb, force_overwrite=True)\n target_dir = cwd + \"/\" + \"we_structures\" + \"/\" + \"2d_c12\"\n shutil.move(cwd + \"/\" + frame_pdb, target_dir + \"/\" + frame_pdb)\n with open(\"frames_c123_2d.pickle\", \"rb\") as input_file:\n frames_c123_2d = pk.load(input_file)\n for i in frames_c123_2d:\n j = index_frame[i]\n frame_index = frames_c123_2d.index(i)\n k = index_bins[j]\n k = k.strip(\"[]\")\n k = k.replace(\"] , [\", \"_\")\n k = k.replace(\", \", \"_\")\n # traj = pt.load(\"system_final.nc\", top=\"system_final.prmtop\", frame_indices=[i])\n traj = md.load_frame(\n \"system_final.nc\", top=\"system_final.prmtop\", index=i\n )\n frame_pdb = str(frame_index) + \"_\" + k + \"_2d_c123_\" + str(i) + \".pdb\"\n # pt.save(frame_pdb, traj, overwrite=True)\n traj.save_pdb(frame_pdb, force_overwrite=True)\n target_dir = cwd + \"/\" + \"we_structures\" + \"/\" + \"2d_c123\"\n shutil.move(cwd + \"/\" + frame_pdb, target_dir + \"/\" + frame_pdb)\n\n\ndef save_we_inputs():\n\n \"\"\"\n Writes an input file in each of the simulation folder.\n Input file contains one column each for the name of\n the PDB file and its respective probability.\n\n \"\"\"\n\n cwd = os.getcwd()\n target_dir = cwd + \"/\" + \"we_structures\"\n dir_list = [\"1d_c1\", \"1d_c12\", \"1d_c123\", \"2d_c1\", \"2d_c12\", \"2d_c123\"]\n for i in dir_list:\n os.chdir(target_dir + \"/\" + i)\n pdbs = os.listdir(\".\")\n pickle_file = \"pdb_\" + i + \".pickle\"\n with open(pickle_file, \"wb\") as f:\n pk.dump(pdbs, f)\n shutil.move(\n target_dir + \"/\" + i + \"/\" + pickle_file, cwd + \"/\" + pickle_file\n )\n os.chdir(cwd)\n # c1_1d\n with open(\"prob_c1_1d_list.pickle\", \"rb\") as input_file:\n prob_c1_1d_list = pk.load(input_file)\n prob_c1_1d_list = [i / min(prob_c1_1d_list) for i in prob_c1_1d_list]\n prob_c1_1d_list = [i / sum(prob_c1_1d_list) for i in prob_c1_1d_list]\n with open(\"pdb_1d_c1.pickle\", \"rb\") as input_file:\n pdb_1d_c1 = pk.load(input_file)\n pdb_1d_c1_index = []\n for i in range(len(pdb_1d_c1)):\n pdb_1d_c1_index.append(int(re.findall(r\"\\d+\", pdb_1d_c1[i])[0]))\n df = pd.DataFrame(\n list(zip(pdb_1d_c1, prob_c1_1d_list, pdb_1d_c1_index)),\n columns=[\"pdb_name\", \"probability\", \"pdb_index\"],\n )\n df = df.sort_values(by=[\"pdb_index\"])\n df = df[[\"probability\", \"pdb_name\"]]\n index_row = []\n for i in range(df.shape[0]):\n index_row.append(i)\n df_index = pd.DataFrame(index_row, columns=[\"index_\"])\n df_merged = pd.concat([df_index, df], axis=1)\n df_merged.to_csv(\n \"we_input_c1_1d.txt\", header=False, index=None, sep=\" \", mode=\"w\"\n )\n # c12_1d\n with open(\"prob_c12_1d_list.pickle\", \"rb\") as input_file:\n prob_c12_1d_list = pk.load(input_file)\n prob_c12_1d_list = [i / min(prob_c12_1d_list) for i in prob_c12_1d_list]\n prob_c12_1d_list = [i / sum(prob_c12_1d_list) for i in prob_c12_1d_list]\n with open(\"pdb_1d_c12.pickle\", \"rb\") as input_file:\n pdb_1d_c12 = pk.load(input_file)\n pdb_1d_c12_index = []\n for i in range(len(pdb_1d_c12)):\n pdb_1d_c12_index.append(int(re.findall(r\"\\d+\", pdb_1d_c12[i])[0]))\n df = pd.DataFrame(\n list(zip(pdb_1d_c12, prob_c12_1d_list, pdb_1d_c12_index)),\n columns=[\"pdb_name\", \"probability\", \"pdb_index\"],\n )\n df = df.sort_values(by=[\"pdb_index\"])\n df = df[[\"probability\", \"pdb_name\"]]\n index_row = []\n for i in range(df.shape[0]):\n index_row.append(i)\n df_index = pd.DataFrame(index_row, columns=[\"index_\"])\n df_merged = pd.concat([df_index, df], axis=1)\n df_merged.to_csv(\n \"we_input_c12_1d.txt\", header=False, index=None, sep=\" \", mode=\"w\"\n )\n # c123_1d\n with open(\"prob_c123_1d_list.pickle\", \"rb\") as input_file:\n prob_c123_1d_list = pk.load(input_file)\n prob_c123_1d_list = [i / min(prob_c123_1d_list) for i in prob_c123_1d_list]\n prob_c123_1d_list = [i / sum(prob_c123_1d_list) for i in prob_c123_1d_list]\n with open(\"pdb_1d_c123.pickle\", \"rb\") as input_file:\n pdb_1d_c123 = pk.load(input_file)\n pdb_1d_c123_index = []\n for i in range(len(pdb_1d_c123)):\n pdb_1d_c123_index.append(int(re.findall(r\"\\d+\", pdb_1d_c123[i])[0]))\n df = pd.DataFrame(\n list(zip(pdb_1d_c123, prob_c123_1d_list, pdb_1d_c123_index)),\n columns=[\"pdb_name\", \"probability\", \"pdb_index\"],\n )\n df = df.sort_values(by=[\"pdb_index\"])\n df = df[[\"probability\", \"pdb_name\"]]\n index_row = []\n for i in range(df.shape[0]):\n index_row.append(i)\n df_index = pd.DataFrame(index_row, columns=[\"index_\"])\n df_merged = pd.concat([df_index, df], axis=1)\n df_merged.to_csv(\n \"we_input_c123_1d.txt\", header=False, index=None, sep=\" \", mode=\"w\"\n )\n # c1_2d\n with open(\"prob_c1_2d_list.pickle\", \"rb\") as input_file:\n prob_c1_2d_list = pk.load(input_file)\n prob_c1_2d_list = [i / min(prob_c1_2d_list) for i in prob_c1_2d_list]\n prob_c1_2d_list = [i / sum(prob_c1_2d_list) for i in prob_c1_2d_list]\n with open(\"pdb_2d_c1.pickle\", \"rb\") as input_file:\n pdb_2d_c1 = pk.load(input_file)\n pdb_2d_c1_index = []\n for i in range(len(pdb_2d_c1)):\n pdb_2d_c1_index.append(int(re.findall(r\"\\d+\", pdb_2d_c1[i])[0]))\n df = pd.DataFrame(\n list(zip(pdb_2d_c1, prob_c1_2d_list, pdb_2d_c1_index)),\n columns=[\"pdb_name\", \"probability\", \"pdb_index\"],\n )\n df = df.sort_values(by=[\"pdb_index\"])\n df = df[[\"probability\", \"pdb_name\"]]\n index_row = []\n for i in range(df.shape[0]):\n index_row.append(i)\n df_index = pd.DataFrame(index_row, columns=[\"index_\"])\n df_merged = pd.concat([df_index, df], axis=1)\n df_merged.to_csv(\n \"we_input_c1_2d.txt\", header=False, index=None, sep=\" \", mode=\"w\"\n )\n # c12_2d\n with open(\"prob_c12_2d_list.pickle\", \"rb\") as input_file:\n prob_c12_2d_list = pk.load(input_file)\n prob_c12_2d_list = [i / min(prob_c12_2d_list) for i in prob_c12_2d_list]\n prob_c12_2d_list = [i / sum(prob_c12_2d_list) for i in prob_c12_2d_list]\n with open(\"pdb_2d_c12.pickle\", \"rb\") as input_file:\n pdb_2d_c12 = pk.load(input_file)\n pdb_2d_c12_index = []\n for i in range(len(pdb_2d_c12)):\n pdb_2d_c12_index.append(int(re.findall(r\"\\d+\", pdb_2d_c12[i])[0]))\n df = pd.DataFrame(\n list(zip(pdb_2d_c12, prob_c12_2d_list, pdb_2d_c12_index)),\n columns=[\"pdb_name\", \"probability\", \"pdb_index\"],\n )\n df = df.sort_values(by=[\"pdb_index\"])\n df = df[[\"probability\", \"pdb_name\"]]\n index_row = []\n for i in range(df.shape[0]):\n index_row.append(i)\n df_index = pd.DataFrame(index_row, columns=[\"index_\"])\n df_merged = pd.concat([df_index, df], axis=1)\n df_merged.to_csv(\n \"we_input_c12_2d.txt\", header=False, index=None, sep=\" \", mode=\"w\"\n )\n # c123_2d\n with open(\"prob_c123_2d_list.pickle\", \"rb\") as input_file:\n prob_c123_2d_list = pk.load(input_file)\n prob_c123_2d_list = [i / min(prob_c123_2d_list) for i in prob_c123_2d_list]\n prob_c123_2d_list = [i / sum(prob_c123_2d_list) for i in prob_c123_2d_list]\n with open(\"pdb_2d_c123.pickle\", \"rb\") as input_file:\n pdb_2d_c123 = pk.load(input_file)\n pdb_2d_c123_index = []\n for i in range(len(pdb_2d_c123)):\n pdb_2d_c123_index.append(int(re.findall(r\"\\d+\", pdb_2d_c123[i])[0]))\n df = pd.DataFrame(\n list(zip(pdb_2d_c123, prob_c123_2d_list, pdb_2d_c123_index)),\n columns=[\"pdb_name\", \"probability\", \"pdb_index\"],\n )\n df = df.sort_values(by=[\"pdb_index\"])\n df = df[[\"probability\", \"pdb_name\"]]\n index_row = []\n for i in range(df.shape[0]):\n index_row.append(i)\n df_index = pd.DataFrame(index_row, columns=[\"index_\"])\n df_merged = pd.concat([df_index, df], axis=1)\n df_merged.to_csv(\n \"we_input_c123_2d.txt\", header=False, index=None, sep=\" \", mode=\"w\"\n )\n\n\ndef arrange_files():\n\n \"\"\"\n\n Creates directories and move files to appropriate folders.\n\n \"\"\"\n\n cwd = os.getcwd()\n os.system(\"rm -rf txt_csv_files\")\n os.system(\"rm -rf we_inputs\")\n os.system(\"rm -rf dat_files\")\n os.system(\"rm -rf pickle_files\")\n os.system(\"rm -rf system_files\")\n os.system(\"mkdir txt_csv_files\")\n os.system(\"mkdir we_inputs\")\n os.system(\"mkdir dat_files\")\n os.system(\"mkdir pickle_files\")\n os.system(\"mkdir system_files\")\n shutil.move(\n cwd + \"/\" + \"c1_frame_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"c1_frame_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"c12_frame_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"c12_frame_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"c123_frame_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"c123_frame_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"c1_frame_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"c1_frame_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"c12_frame_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"c12_frame_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"c123_frame_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"c123_frame_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c1_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c1_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c12_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c12_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c123_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c123_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c1_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c1_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c12_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c12_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c123_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c123_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c1_arranged_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c1_arranged_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c12_arranged_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c12_arranged_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c123_arranged_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c123_arranged_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c1_arranged_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c1_arranged_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c12_arranged_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c12_arranged_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"pA_c123_arranged_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"pA_c123_arranged_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"ref_1d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"ref_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"ref_2d.txt\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"ref_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"df_1d.csv\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"df_1d.csv\",\n )\n shutil.move(\n cwd + \"/\" + \"df_2d.csv\",\n cwd + \"/\" + \"txt_csv_files\" + \"/\" + \"df_2d.csv\",\n )\n shutil.move(\n cwd + \"/\" + \"we_input_c1_1d.txt\",\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c1_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"we_input_c12_1d.txt\",\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c12_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"we_input_c123_1d.txt\",\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c123_1d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"we_input_c1_2d.txt\",\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c1_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"we_input_c12_2d.txt\",\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c12_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"we_input_c123_2d.txt\",\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c123_2d.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"weights.dat\",\n cwd + \"/\" + \"dat_files\" + \"/\" + \"weights.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"Psi.dat\", cwd + \"/\" + \"dat_files\" + \"/\" + \"Psi.txt\"\n )\n shutil.move(\n cwd + \"/\" + \"Phi_Psi.dat\",\n cwd + \"/\" + \"dat_files\" + \"/\" + \"Phi_Psi.txt\",\n )\n shutil.move(\n cwd + \"/\" + \"prob_c1_1d_list.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"prob_c1_1d_list.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"prob_c12_1d_list.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"prob_c12_1d_list.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"prob_c123_1d_list.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"prob_c123_1d_list.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"prob_c1_2d_list.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"prob_c1_2d_list.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"prob_c12_2d_list.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"prob_c12_2d_list.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"prob_c123_2d_list.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"prob_c123_2d_list.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"pdb_1d_c1.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"pdb_1d_c1.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"pdb_1d_c12.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"pdb_1d_c12.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"pdb_1d_c123.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"pdb_1d_c123.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"pdb_2d_c1.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"pdb_2d_c1.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"pdb_2d_c12.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"pdb_2d_c12.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"pdb_2d_c123.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"pdb_2d_c123.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"frames_c1_1d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"frames_c1_1d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"frames_c12_1d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"frames_c12_1d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"frames_c123_1d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"frames_c123_1d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"frames_c1_2d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"frames_c1_2d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"frames_c12_2d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"frames_c12_2d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"frames_c123_2d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"frames_c123_2d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"indices_c1_1d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"indices_c1_1d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"indices_c12_1d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"indices_c12_1d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"indices_c123_1d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"indices_c123_1d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"indices_c1_2d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"indices_c1_2d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"indices_c12_2d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"indices_c12_2d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"indices_c123_2d.pickle\",\n cwd + \"/\" + \"pickle_files\" + \"/\" + \"indices_c123_2d.pickle\",\n )\n shutil.move(\n cwd + \"/\" + \"system_final.inpcrd\",\n cwd + \"/\" + \"system_files\" + \"/\" + \"system_final.inpcrd\",\n )\n shutil.move(\n cwd + \"/\" + \"system_final.nc\",\n cwd + \"/\" + \"system_files\" + \"/\" + \"system_final.nc\",\n )\n shutil.move(\n cwd + \"/\" + \"system_final.out\",\n cwd + \"/\" + \"system_files\" + \"/\" + \"system_final.out\",\n )\n shutil.move(\n cwd + \"/\" + \"system_final.prmtop\",\n cwd + \"/\" + \"system_files\" + \"/\" + \"system_final.prmtop\",\n )\n shutil.move(\n cwd + \"/\" + \"system_final.rst\",\n cwd + \"/\" + \"system_files\" + \"/\" + \"system_final.rst\",\n )\n shutil.move(\n cwd + \"/\" + \"gamd.log\", cwd + \"/\" + \"system_files\" + \"/\" + \"gamd.log\"\n )\n shutil.move(\n cwd + \"/\" + \"md.in\", cwd + \"/\" + \"system_files\" + \"/\" + \"md.in\"\n )\n shutil.move(\n cwd + \"/\" + \"mdinfo\", cwd + \"/\" + \"system_files\" + \"/\" + \"mdinfo\"\n )\n shutil.move(\n cwd + \"/\" + \"gamd-restart.dat\",\n cwd + \"/\" + \"system_files\" + \"/\" + \"gamd-restart.dat\",\n )\n\n\ndef run_reweigh():\n\n \"\"\"\n Runs reweighing calculations systematically\n in the simulation folder.\n\n \"\"\"\n\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n cwd = os.getcwd()\n target_dir = cwd + \"/\" + \"gamd_simulations\" + \"/\"\n # run reweighting and analysis in each of the simulation folder\n for i in dir_list:\n os.chdir(target_dir + i)\n create_data_files()\n reweight_1d()\n reweight_2d()\n save_frames()\n save_we_inputs()\n arrange_files()\n os.chdir(cwd)\n\n\ndef save_westpa_inputs():\n\n \"\"\"\n Creates separate folders to initiate WE simulations.\n\n \"\"\"\n\n cwd = os.getcwd()\n list_dir = [\"1d_c1\", \"1d_c12\", \"1d_c123\", \"2d_c1\", \"2d_c12\", \"2d_c123\"]\n for i in list_dir:\n os.chdir(cwd + \"/\" + \"we_structures\" + \"/\" + i)\n files = os.listdir(\".\")\n file_to_find = \"*.pdb\"\n pdb_list = []\n for x in files:\n if fnmatch.fnmatch(x, file_to_find):\n pdb_list.append(x)\n for j in pdb_list:\n fix_cap_remove_nme(j)\n fix_cap_replace_nme(j)\n inpcrd_file = j[:-4] + \".inpcrd\"\n filename = \"input_\" + j[:-4] + \".leap\"\n file = open(filename, \"w\")\n file.write(\"source leaprc.protein.ff14SB\" + \"\\n\")\n file.write(\"source leaprc.water.tip3p\" + \"\\n\")\n file.write(\"set default FlexibleWater on\" + \"\\n\")\n file.write(\"set default PBRadii mbondi2\" + \"\\n\")\n file.write(\"pdb = loadpdb \" + j + \"\\n\")\n file.write(\n \"saveamberparm pdb \"\n + j[:-4]\n + \".prmtop \"\n + j[:-4]\n + \".inpcrd\"\n + \"\\n\"\n )\n file.write(\"quit\" + \"\\n\")\n file.close()\n files = os.listdir(\".\")\n file_to_find = \"*.leap\"\n leap_list = []\n for y in files:\n if fnmatch.fnmatch(y, file_to_find):\n leap_list.append(y)\n for k in leap_list:\n command = \"tleap -f {}\".format(k)\n os.system(command)\n os.system(\"rm -rf leap.log\")\n os.system(\"rm -rf *prmtop*\")\n os.system(\"rm -rf *leap*\")\n os.system(\"rm -rf bstates\")\n os.system(\"mkdir bstates\")\n for j in pdb_list:\n shutil.move(\n cwd\n + \"/\"\n + \"we_structures\"\n + \"/\"\n + i\n + \"/\"\n + j[:-4]\n + \".inpcrd\",\n cwd\n + \"/\"\n + \"we_structures\"\n + \"/\"\n + i\n + \"/\"\n + \"bstates\"\n + \"/\"\n + j[:-4]\n + \".inpcrd\",\n )\n os.chdir(cwd)\n\n os.system(\"rm -rf westpa_inputs\")\n os.system(\"mkdir westpa_inputs\")\n for l in list_dir:\n os.chdir(cwd + \"/\" + \"westpa_inputs\")\n command = \"rm -rf {}\".format(l)\n os.system(command)\n command = \"mkdir {}\".format(l)\n os.system(command)\n shutil.move(\n cwd + \"/\" + \"we_structures\" + \"/\" + l + \"/\" + \"bstates\",\n cwd + \"/\" + \"westpa_inputs\" + \"/\" + l + \"/\" + \"bstates\",\n )\n os.chdir(cwd)\n shutil.copy(\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c1_1d.txt\",\n cwd\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + list_dir[0]\n + \"/\"\n + \"we_input_c1_1d.txt\",\n )\n shutil.copy(\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c12_1d.txt\",\n cwd\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + list_dir[1]\n + \"/\"\n + \"we_input_c12_1d.txt\",\n )\n shutil.copy(\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c123_1d.txt\",\n cwd\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + list_dir[2]\n + \"/\"\n + \"we_input_c123_1d.txt\",\n )\n shutil.copy(\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c1_2d.txt\",\n cwd\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + list_dir[3]\n + \"/\"\n + \"we_input_c1_2d.txt\",\n )\n shutil.copy(\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c12_2d.txt\",\n cwd\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + list_dir[4]\n + \"/\"\n + \"we_input_c12_2d.txt\",\n )\n shutil.copy(\n cwd + \"/\" + \"we_inputs\" + \"/\" + \"we_input_c123_2d.txt\",\n cwd\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + list_dir[5]\n + \"/\"\n + \"we_input_c123_2d.txt\",\n )\n\n for i in list_dir:\n os.chdir(cwd + \"/\" + \"westpa_inputs\" + \"/\" + i)\n for file in os.listdir(\".\"):\n if fnmatch.fnmatch(file, \"*.txt\"):\n file_to_rename = file\n f = open(file_to_rename, \"rt\")\n data = f.read()\n data = data.replace(\"pdb\", \"inpcrd\")\n f.close()\n f = open(file_to_rename, \"wt\")\n f.write(data)\n f.close()\n os.rename(file_to_rename, \"BASIS_STATES\")\n os.chdir(cwd)\n\n for i in list_dir:\n os.chdir(cwd + \"/\" + \"westpa_inputs\" + \"/\" + i)\n os.mkdir(\"CONFIG\")\n shutil.copy(\n cwd + \"/\" + \"system_files\" + \"/\" + \"system_final.prmtop\",\n cwd\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + i\n + \"/\"\n + \"CONFIG\"\n + \"/\"\n + \"system_final.prmtop\",\n )\n os.chdir(cwd)\n\n\ndef run_westpa_inputs():\n\n \"\"\"\n\n Systematically runs save_westpa_inputs function in\n the simulation directory.\n\n \"\"\"\n\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n cwd = os.getcwd()\n source_dir = cwd + \"/\"\n target_dir = cwd + \"/\" + \"gamd_simulations\" + \"/\"\n\n for i in dir_list:\n os.chdir(target_dir + i)\n save_westpa_inputs()\n os.chdir(cwd)\n\n\ndef transfer_files():\n\n \"\"\"\n\n Deletes unnecessary files in the simulation\n directory and creates a new WE simulation folder\n\n \"\"\"\n\n os.system(\"rm -rf westpa_dir\")\n os.system(\"mkdir westpa_dir\")\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n cwd = os.getcwd()\n source_dir = cwd + \"/\"\n target_dir = cwd + \"/\" + \"gamd_simulations\" + \"/\"\n for i in dir_list:\n os.chdir(source_dir + \"westpa_dir\")\n command = \"mkdir {}\".format(i)\n os.system(command)\n os.chdir(cwd)\n for i in dir_list:\n shutil.copytree(\n target_dir + i + \"/\" + \"westpa_inputs\",\n source_dir + \"westpa_dir\" + \"/\" + i + \"/\" \"westpa_inputs\",\n )\n we_list = [\"1d_c1\", \"1d_c12\", \"1d_c123\", \"2d_c1\", \"2d_c12\", \"2d_c123\"]\n for j in we_list:\n shutil.copytree(\n source_dir\n + \"westpa_dir\"\n + \"/\"\n + i\n + \"/\"\n + \"westpa_inputs\"\n + \"/\"\n + j,\n source_dir + \"westpa_dir\" + \"/\" + i + \"/\" + j,\n )\n dest_dir = source_dir + \"westpa_dir\" + \"/\" + i\n os.chdir(dest_dir)\n os.system(\"rm -rf westpa_inputs\")\n os.chdir(cwd)\n os.chdir(cwd)\n\n\ndef add_vectors_westpa_files():\n\n \"\"\"\n\n Adds box vector dimensions to the inpcrd file.\n To be used only when the box vector dimensions\n are not available at the last line of inpcrd file.\n\n \"\"\"\n\n cwd = os.getcwd()\n source_dir = cwd\n westpa_dir = cwd + \"/\" + \"westpa_dir\"\n os.chdir(source_dir + \"/\" + \"starting_structures\")\n with open(\"system_final.inpcrd\") as f:\n for line in f:\n pass\n vector_information = line\n print(vector_information)\n os.chdir(source_dir)\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n we_list = [\"1d_c1\", \"1d_c12\", \"1d_c123\", \"2d_c1\", \"2d_c12\", \"2d_c123\"]\n for i in dir_list:\n os.chdir(westpa_dir + \"/\" + str(i))\n for j in we_list:\n os.chdir(\n westpa_dir + \"/\" + str(i) + \"/\" + str(j) + \"/\" + \"bstates\"\n )\n files = os.listdir(\".\")\n file_to_find = \"*.inpcrd\"\n inpcrd_list = []\n for k in files:\n if fnmatch.fnmatch(k, file_to_find):\n inpcrd_list.append(k)\n for l in inpcrd_list:\n with open(l, \"a+\") as f:\n f.write(vector_information)\n os.chdir(cwd)\n\n\ndef we_analysis():\n\n \"\"\"\n\n Runs short MD simulation for saved inpcrd files.\n\n \"\"\"\n\n cwd = os.getcwd()\n os.chdir(cwd + \"/\" + \"westpa_dir\")\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n we_list = [\"1d_c1\", \"1d_c12\", \"1d_c123\", \"2d_c1\", \"2d_c12\", \"2d_c123\"]\n for i in dir_list:\n for j in we_list:\n os.chdir(cwd + \"/\" + \"westpa_dir\" + \"/\" + str(i))\n os.chdir(cwd + \"/\" + \"westpa_dir\" + \"/\" + str(i) + \"/\" + str(j))\n if len(open(\"BASIS_STATES\").readlines()) > 0:\n df = pd.read_csv(\"BASIS_STATES\", delimiter=\" \", header=None)\n df.columns = [[\"descriptor\", \"probability\", \"file_name\"]]\n df1 = df[[\"file_name\"]]\n inpcrd_list = df1.values.tolist()\n inpcrd_list = list(itertools.chain(*inpcrd_list))\n os.system(\"rm -rf md_sims\")\n os.system(\"mkdir md_sims\")\n os.chdir(\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"md_sims\"\n )\n with open(\"md.in\", \"w\") as f:\n f.write(\n \"Run minimization followed by saving rst file\" + \"\\n\"\n )\n f.write(\"&cntrl\" + \"\\n\")\n f.write(\n \" imin = 1, maxcyc = 10000, ntpr = 5, iwrap = 1, ntxo = 1\"\n + \"\\n\"\n )\n f.write(\"&end\" + \"\\n\")\n for k in inpcrd_list:\n source_dir = (\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"bstates\"\n )\n target_dir = (\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"md_sims\"\n )\n shutil.copy(\n source_dir + \"/\" + str(k), target_dir + \"/\" + str(k)\n )\n source_dir = cwd + \"/\" + \"starting_structures\"\n target_dir = (\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"md_sims\"\n )\n shutil.copy(\n source_dir + \"/\" + \"system_final.prmtop\",\n target_dir + \"/\" + \"system_final.prmtop\",\n )\n for l in range(len(inpcrd_list)):\n command = (\n \"pmemd.cuda -O -i md.in -o \"\n + inpcrd_list[l][:-6]\n + \"out\"\n + \" -p system_final.prmtop -c \"\n + inpcrd_list[l]\n + \" -r \"\n + inpcrd_list[l][:-6]\n + \"rst\"\n )\n print(command)\n os.system(command)\n os.chdir(cwd)\n\n\ndef correction_westpa():\n\n \"\"\"\n\n Eliminates all inpcrd files crashed during the short MD simulation\n run. Also create folders for .rst files in case it is needed for\n WE simulations\n\n \"\"\"\n\n cwd = os.getcwd()\n os.chdir(cwd + \"/\" + \"westpa_dir\")\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n we_list = [\"1d_c1\", \"1d_c12\", \"1d_c123\", \"2d_c1\", \"2d_c12\", \"2d_c123\"]\n for i in dir_list:\n for j in we_list:\n os.chdir(cwd + \"/\" + \"westpa_dir\" + \"/\" + str(i))\n os.chdir(cwd + \"/\" + \"westpa_dir\" + \"/\" + str(i) + \"/\" + str(j))\n if len(open(\"BASIS_STATES\").readlines()) > 0:\n os.chdir(\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"md_sims\"\n )\n files = os.listdir(\".\")\n file_to_find = \"*.out\"\n out_list = []\n for y in files:\n if fnmatch.fnmatch(y, file_to_find):\n out_list.append(y)\n list_failed_jobs = []\n for out_file in out_list:\n with open(out_file, \"r\") as f:\n last_line = f.readlines()[-2]\n if last_line.startswith(\"|\") == False:\n list_failed_jobs.append(out_file)\n for c in range(len(list_failed_jobs)):\n command = \"rm -rf \" + list_failed_jobs[c]\n os.system(command)\n for d in range(len(list_failed_jobs)):\n command = \"rm -rf \" + list_failed_jobs[d][:-3] + \"rst\"\n os.system(command)\n for e in range(len(list_failed_jobs)):\n command = \"rm -rf \" + list_failed_jobs[e][:-3] + \"inpcrd\"\n os.system(command)\n for f in range(len(list_failed_jobs)):\n command = \"rm -rf \" + list_failed_jobs[f][:-3] + \"nc\"\n os.system(command)\n\n files = os.listdir(\".\")\n file_to_find = \"*.rst\"\n rst_list = []\n for y in files:\n if fnmatch.fnmatch(y, file_to_find):\n rst_list.append(y)\n rst_failed_jobs = []\n for rst_file in rst_list:\n with open(rst_file, \"r\") as f:\n req_line = f.readlines()[2]\n if \"NaN\" in req_line:\n rst_failed_jobs.append(rst_file)\n for g in range(len(rst_failed_jobs)):\n command = \"rm -rf \" + rst_failed_jobs[g]\n os.system(command)\n for h in range(len(rst_failed_jobs)):\n command = \"rm -rf \" + rst_failed_jobs[h][:-3] + \"out\"\n os.system(command)\n for u in range(len(rst_failed_jobs)):\n command = \"rm -rf \" + rst_failed_jobs[u][:-3] + \"inpcrd\"\n os.system(command)\n for v in range(len(rst_failed_jobs)):\n command = \"rm -rf \" + rst_failed_jobs[v][:-3] + \"nc\"\n os.system(command)\n\n files_2 = os.listdir(\".\")\n file_to_find_2 = \"*.rst\"\n rst_list_2 = []\n for y in files_2:\n if fnmatch.fnmatch(y, file_to_find_2):\n rst_list_2.append(y)\n rst_failed_jobs_2 = []\n for rst_file_2 in rst_list_2:\n with open(rst_file_2, \"r\") as f:\n lines_file = f.readlines()\n for req_line in lines_file:\n if \"*\" in req_line:\n rst_failed_jobs_2.append(rst_file_2)\n for g in range(len(rst_failed_jobs_2)):\n command = \"rm -rf \" + rst_failed_jobs_2[g]\n os.system(command)\n for h in range(len(rst_failed_jobs_2)):\n command = \"rm -rf \" + rst_failed_jobs_2[h][:-3] + \"out\"\n os.system(command)\n for u in range(len(rst_failed_jobs_2)):\n command = \"rm -rf \" + rst_failed_jobs_2[u][:-3] + \"inpcrd\"\n os.system(command)\n for v in range(len(rst_failed_jobs_2)):\n command = \"rm -rf \" + rst_failed_jobs_2[v][:-3] + \"nc\"\n os.system(command)\n\n os.system(\"rm -rf md.in\")\n os.system(\"rm -rf system_final.prmtop\")\n os.system(\"rm -rf mdinfo\")\n files = os.listdir(\".\")\n inpcrd_file_to_find = \"*.inpcrd\"\n rst_file_to_find = \"*.rst\"\n inpcrd_file_list = []\n for y in files:\n if fnmatch.fnmatch(y, inpcrd_file_to_find):\n inpcrd_file_list.append(y)\n rst_file_list = []\n for z in files:\n if fnmatch.fnmatch(z, rst_file_to_find):\n rst_file_list.append(z)\n os.chdir(\n cwd + \"/\" + \"westpa_dir\" + \"/\" + str(i) + \"/\" + str(j)\n )\n os.system(\"rm -rf bstates_corrected_rst\")\n os.system(\"mkdir bstates_corrected_rst\")\n os.system(\"rm -rf bstates_corrected_inpcrd\")\n os.system(\"mkdir bstates_corrected_inpcrd\")\n for x in inpcrd_file_list:\n shutil.copy(\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"md_sims\"\n + \"/\"\n + str(x),\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"bstates_corrected_inpcrd\"\n + \"/\"\n + str(x),\n )\n for y in rst_file_list:\n shutil.copy(\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"md_sims\"\n + \"/\"\n + str(y),\n cwd\n + \"/\"\n + \"westpa_dir\"\n + \"/\"\n + str(i)\n + \"/\"\n + str(j)\n + \"/\"\n + \"bstates_corrected_rst\"\n + \"/\"\n + str(y),\n )\n df = pd.read_csv(\"BASIS_STATES\", sep=\" \", header=None)\n df.columns = [\"index_df\", \"probability\", \"inpcrd\"]\n df = df[[\"probability\", \"inpcrd\"]]\n df = df[df.inpcrd.str.contains(\"|\".join(inpcrd_file_list))]\n index_row_list = []\n for n in range(df.shape[0]):\n index_row_list.append(n)\n df = df.assign(index_=index_row_list)\n df = df[[\"index_\", \"probability\", \"inpcrd\"]]\n df.to_csv(\n \"BASIS_STATES_CORRECTED_INPCRD\",\n header=False,\n index=None,\n sep=\" \",\n mode=\"w\",\n )\n fin = open(\"BASIS_STATES_CORRECTED_INPCRD\", \"rt\")\n fout = open(\"BASIS_STATES_CORRECTED_RST\", \"wt\")\n for line in fin:\n fout.write(line.replace(\"inpcrd\", \"rst\"))\n fin.close()\n fout.close()\n os.chdir(cwd)\n\n\ndef plot_contrib():\n\n \"\"\"\n\n Plots to review the analysis done. Plot bar\n graphs for the number of structures obtained\n for WE simulation for each of the potential\n boosts during GaMD simulation.\n\n \"\"\"\n\n cwd = os.getcwd()\n os.chdir(cwd + \"/\" + \"westpa_dir\")\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n we_list = [\"1d_c1\", \"1d_c12\", \"1d_c123\", \"2d_c1\", \"2d_c12\", \"2d_c123\"]\n confs = []\n for i in dir_list:\n conf_within = []\n for j in we_list:\n os.chdir(cwd + \"/\" + \"westpa_dir\" + \"/\" + str(i))\n os.chdir(cwd + \"/\" + \"westpa_dir\" + \"/\" + str(i) + \"/\" + str(j))\n if len(open(\"BASIS_STATES\").readlines()) > 0:\n count1 = len(open(\"BASIS_STATES\").readlines())\n count2 = len(open(\"BASIS_STATES_CORRECTED_RST\").readlines())\n conf = str(i), str(j), count1, count2\n conf_within.append(conf)\n confs.append(conf_within)\n print(confs)\n os.chdir(cwd)\n\n corrected_list = []\n for i in range(len(confs)):\n corrected_list_1 = []\n for j in range(len(confs[i])):\n corrected_list_1.append(confs[i][j][3])\n corrected_list.append(corrected_list_1)\n print(corrected_list)\n\n expanse_list = []\n for i in range(len(confs)):\n expanse_list_1 = []\n for j in range(len(confs[i])):\n expanse_list_1.append(confs[i][j][1])\n expanse_list.append(expanse_list_1)\n print(expanse_list)\n\n x0 = expanse_list[0]\n y0 = corrected_list[0]\n x1 = expanse_list[1]\n y1 = corrected_list[1]\n x2 = expanse_list[2]\n y2 = corrected_list[2]\n x3 = expanse_list[3]\n y3 = corrected_list[3]\n x4 = expanse_list[4]\n y4 = corrected_list[4]\n x5 = expanse_list[5]\n y5 = corrected_list[5]\n\n y = y0\n x = x0\n title = \"Configurations vs Different Expansions\" + \" for \" + dir_list[0]\n print(title)\n sns.set(font_scale=1)\n plt.rcParams[\"figure.figsize\"] = (8, 4)\n plt.rcParams[\"font.family\"] = \"serif\"\n style.use(\"fivethirtyeight\")\n g = sns.barplot(y, x, palette=(\"binary\"))\n g.grid(False)\n g.set_title(title)\n g.set(xlabel=\"Configurations\", ylabel=\"Expansion\")\n ax = g\n for i, v in enumerate(y):\n ax.text(v + 1, i + 0.25, str(v), color=\"black\", fontweight=\"bold\")\n fig_name = dir_list[0]\n plt.savefig(fig_name, bbox_inches=\"tight\")\n plt.show(block=False)\n plt.pause(1)\n plt.close()\n\n y = y1\n x = x1\n title = \"Configurations vs Different Expansions\" + \" for \" + dir_list[1]\n print(title)\n sns.set(font_scale=1)\n plt.rcParams[\"figure.figsize\"] = (8, 4)\n plt.rcParams[\"font.family\"] = \"serif\"\n style.use(\"fivethirtyeight\")\n g = sns.barplot(y, x, palette=(\"binary\"))\n g.grid(False)\n g.set_title(title)\n g.set(xlabel=\"Configurations\", ylabel=\"Expansion\")\n ax = g\n for i, v in enumerate(y):\n ax.text(v + 1, i + 0.25, str(v), color=\"black\", fontweight=\"bold\")\n fig_name = dir_list[1]\n plt.savefig(fig_name, bbox_inches=\"tight\")\n plt.show(block=False)\n plt.pause(1)\n plt.close()\n\n y = y2\n x = x2\n title = \"Configurations vs Different Expansions\" + \" for \" + dir_list[2]\n print(title)\n sns.set(font_scale=1)\n plt.rcParams[\"figure.figsize\"] = (8, 4)\n plt.rcParams[\"font.family\"] = \"serif\"\n style.use(\"fivethirtyeight\")\n g = sns.barplot(y, x, palette=(\"binary\"))\n g.grid(False)\n g.set_title(title)\n g.set(xlabel=\"Configurations\", ylabel=\"Expansion\")\n ax = g\n for i, v in enumerate(y):\n ax.text(v + 1, i + 0.25, str(v), color=\"black\", fontweight=\"bold\")\n fig_name = dir_list[2]\n plt.savefig(fig_name, bbox_inches=\"tight\")\n plt.show(block=False)\n plt.pause(1)\n plt.close()\n\n y = y3\n x = x3\n title = \"Configurations vs Different Expansions\" + \" for \" + dir_list[3]\n print(title)\n sns.set(font_scale=1)\n plt.rcParams[\"figure.figsize\"] = (8, 4)\n plt.rcParams[\"font.family\"] = \"serif\"\n style.use(\"fivethirtyeight\")\n g = sns.barplot(y, x, palette=(\"binary\"))\n g.grid(False)\n g.set_title(title)\n g.set(xlabel=\"Configurations\", ylabel=\"Expansion\")\n ax = g\n for i, v in enumerate(y):\n ax.text(v + 1, i + 0.25, str(v), color=\"black\", fontweight=\"bold\")\n fig_name = dir_list[3]\n plt.savefig(fig_name, bbox_inches=\"tight\")\n plt.show(block=False)\n plt.pause(1)\n plt.close()\n\n y = y4\n x = x4\n title = \"Configurations vs Different Expansions\" + \" for \" + dir_list[4]\n print(title)\n sns.set(font_scale=1)\n plt.rcParams[\"figure.figsize\"] = (8, 4)\n plt.rcParams[\"font.family\"] = \"serif\"\n style.use(\"fivethirtyeight\")\n g = sns.barplot(y, x, palette=(\"binary\"))\n g.grid(False)\n g.set_title(title)\n g.set(xlabel=\"Configurations\", ylabel=\"Expansion\")\n ax = g\n for i, v in enumerate(y):\n ax.text(v + 1, i + 0.25, str(v), color=\"black\", fontweight=\"bold\")\n fig_name = dir_list[4]\n plt.savefig(fig_name, bbox_inches=\"tight\")\n plt.show(block=False)\n plt.pause(1)\n plt.close()\n\n y = y5\n x = x5\n title = \"Configurations vs Different Expansions\" + \" for \" + dir_list[5]\n print(title)\n sns.set(font_scale=1)\n plt.rcParams[\"figure.figsize\"] = (8, 4)\n plt.rcParams[\"font.family\"] = \"serif\"\n style.use(\"fivethirtyeight\")\n g = sns.barplot(y, x, palette=(\"binary\"))\n g.grid(False)\n g.set_title(title)\n g.set(xlabel=\"Configurations\", ylabel=\"Expansion\")\n ax = g\n for i, v in enumerate(y):\n ax.text(v + 1, i + 0.25, str(v), color=\"black\", fontweight=\"bold\")\n fig_name = dir_list[5]\n plt.savefig(fig_name, bbox_inches=\"tight\")\n plt.show(block=False)\n plt.pause(1)\n plt.close()\n\n rcParams[\"figure.figsize\"] = 30, 20\n plt.rcParams[\"axes.grid\"] = False\n img_1 = mpimg.imread(\"dihedral_threshold_lower.png\")\n img_2 = mpimg.imread(\"dihedral_threshold_upper.png\")\n img_3 = mpimg.imread(\"dual_threshold_lower.png\")\n img_4 = mpimg.imread(\"dual_threshold_upper.png\")\n img_5 = mpimg.imread(\"total_threshold_lower.png\")\n img_6 = mpimg.imread(\"total_threshold_upper.png\")\n fig, ax = plt.subplots(3, 2)\n fig.suptitle(\"\")\n ax[0, 1].imshow(img_1)\n ax[1, 1].imshow(img_2)\n ax[0, 0].imshow(img_3)\n ax[1, 0].imshow(img_4)\n ax[2, 0].imshow(img_5)\n ax[2, 1].imshow(img_6)\n plt.savefig(\"analysis.png\")\n plt.show(block=False)\n plt.pause(3)\n plt.close()\n\n cwd = os.getcwd()\n os.system(\"rm -rf analysis\")\n os.system(\"mkdir analysis\")\n target_dir = cwd + \"/\" + \"analysis\"\n command = \"mv analysis.png \" + target_dir\n os.system(command)\n os.system(\"rm -rf *.png*\")\n\n\ndef clean_for_analysis():\n\n \"\"\"\n\n Rstructures the entire filetree to start reweighing\n analysis again. Used only when we want to run the analysis\n again.\n\n \"\"\"\n\n os.system(\"rm -rf westpa_dir\")\n dir_list = [\n \"dihedral_threshold_lower\",\n \"dihedral_threshold_upper\",\n \"dual_threshold_lower\",\n \"dual_threshold_upper\",\n \"total_threshold_lower\",\n \"total_threshold_upper\",\n ]\n cwd = os.getcwd()\n source_dir = cwd + \"/\"\n target_dir = cwd + \"/\" + \"gamd_simulations\" + \"/\"\n\n for i in dir_list:\n os.chdir(target_dir + i)\n os.system(\n \"rm -rf pickle_files dat_files txt_csv_files we_inputs westpa_inputs we_structures\"\n )\n os.chdir(cwd)\n\n for i in dir_list:\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"gamd.log\",\n cwd + \"/\" + \"gamd_simulations\" + \"/\" + i + \"/\" + \"gamd.log\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"gamd-restart.dat\",\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"gamd-restart.dat\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"md.in\",\n cwd + \"/\" + \"gamd_simulations\" + \"/\" + i + \"/\" + \"md.in\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"mdinfo\",\n cwd + \"/\" + \"gamd_simulations\" + \"/\" + i + \"/\" + \"mdinfo\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"system_final.inpcrd\",\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_final.inpcrd\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"system_final.nc\",\n cwd + \"/\" + \"gamd_simulations\" + \"/\" + i + \"/\" + \"system_final.nc\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"system_final.out\",\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_final.out\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"system_final.prmtop\",\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_final.prmtop\",\n )\n shutil.move(\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_files\"\n + \"/\"\n + \"system_final.rst\",\n cwd\n + \"/\"\n + \"gamd_simulations\"\n + \"/\"\n + i\n + \"/\"\n + \"system_final.rst\",\n )\n\n for i in dir_list:\n os.chdir(target_dir + i)\n os.system(\"rm -rf system_files\")\n os.chdir(cwd)\n\n\n\"\"\"\nprepare_alanine_dipeptide()\nrun_equilibration()\ncreate_starting_structures()\nadd_vec_inpcrd()\nadd_vec_prmtop()\ncreate_filetree()\nrun_simulations()\nrun_reweigh()\nrun_westpa_inputs()\ntransfer_files()\nadd_vectors_westpa_files()\nwe_analysis()\ncorrection_westpa()\nplot_contrib()\nclean_for_analysis()\n\"\"\"\n"
]
| [
[
"numpy.histogram",
"matplotlib.style.use",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.image.imread",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.pause",
"pandas.concat",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
srikanth-kilaru/fsdl-text-recognizer-project | [
"f1043d484f083c1fec09b4abd9fd569f42503d4c"
]
| [
"lab2/text_recognizer/networks/line_cnn_sliding_window.py"
]
| [
"import pathlib\nfrom typing import Tuple\n\nfrom boltons.cacheutils import cachedproperty\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, Permute, Reshape, TimeDistributed, Lambda, ZeroPadding2D\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.models import Model as KerasModel\n\nfrom text_recognizer.models.line_model import LineModel\nfrom text_recognizer.networks.lenet import lenet\nfrom text_recognizer.networks.misc import slide_window\n\n\ndef line_cnn_sliding_window(\n input_shape: Tuple[int, ...],\n output_shape: Tuple[int, ...],\n window_width: float=16,\n window_stride: float=10) -> KerasModel:\n \"\"\"\n Input is an image with shape (image_height, image_width)\n Output is of shape (output_length, num_classes)\n \"\"\"\n image_height, image_width = input_shape\n output_length, num_classes = output_shape\n\n image_input = Input(shape=input_shape)\n # (image_height, image_width)\n\n image_reshaped = Reshape((image_height, image_width, 1))(image_input)\n # (image_height, image_width, 1)\n\n image_patches = Lambda(\n slide_window,\n arguments={'window_width': window_width, 'window_stride': window_stride}\n )(image_reshaped)\n # (num_windows, image_height, window_width, 1)\n\n # Make a LeNet and get rid of the last two layers (softmax and dropout)\n convnet = lenet((image_height, window_width, 1), (num_classes,))\n convnet = KerasModel(inputs=convnet.inputs, outputs=convnet.layers[-2].output)\n\n convnet_outputs = TimeDistributed(convnet)(image_patches)\n # (num_windows, 128)\n\n # Now we have to get to (output_length, num_classes) shape. One way to do it is to do another sliding window with\n # width = floor(num_windows / output_length)\n # Note that this will likely produce too many items in the output sequence, so take only output_length,\n # and watch out that width is at least 2 (else we will only be able to predict on the first half of the line)\n\n ##### Your code below (Lab 2)\n convnet_outputs_extra_dim = Lambda(lambda x: tf.expand_dims(x, -1))(convnet_outputs)\n num_windows = int((image_width - window_width) / window_stride) + 1\n width = int(num_windows / output_length)\n conved_convnet_outputs = Conv2D(num_classes, (width, 128), (width, 1), activation='softmax')(convnet_outputs_extra_dim)\n squeezed_conved_convnet_outputs = Lambda(lambda x: tf.squeeze(x, 2))(conved_convnet_outputs)\n\n # Since we floor'd the calculation of width, we might have too many items in the sequence. Take only output_length.\n softmax_output = Lambda(lambda x: x[:, :output_length, :])(squeezed_conved_convnet_outputs)\n ##### Your code above (Lab 2)\n\n model = KerasModel(inputs=image_input, outputs=softmax_output)\n model.summary()\n return model\n\n"
]
| [
[
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.TimeDistributed",
"tensorflow.expand_dims",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.squeeze"
]
]
|
zen-juen/RealityBending.github.io | [
"c79c33eced03cba732b684698c81ba64e4bf887c"
]
| [
"content/post/2021-11-12-complexity_neurokit/make_figs.py"
]
| [
"import matplotlib.pyplot as plt\nimport neurokit2 as nk\n\nsignal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)\nnk.complexity_mfdfa(signal, show=True)\n\nfig = plt.gcf()\nfig.set_size_inches(10, 8, forward=True)\nfig.savefig(\"featured.png\", dpi=300, facecolor='w')\n"
]
| [
[
"matplotlib.pyplot.gcf"
]
]
|
zbr17/GeDML | [
"83f6f835e7c3319e32432b3013820f98476cc96c"
]
| [
"src/gedml/core/datasets/online_products.py"
]
| [
"import torch\nimport numpy as np \nfrom pandas import read_csv\nimport os\nfrom PIL import Image\nfrom .base_dataset import BaseDataset\n\nclass OnlineProducts(BaseDataset):\n \"\"\"\n `Stanford Online Products <https://cvgl.stanford.edu/projects/lifted_struct/>`_\n \"\"\"\n def _set_dataset_info(self):\n if self.assert_num_classes is None:\n if self.phase == \"train\":\n self.assert_num_classes = 11318\n else:\n self.assert_num_classes = 11316\n if self.assert_num_samples is None:\n if self.phase == \"train\":\n self.assert_num_samples = 59551\n else:\n self.assert_num_samples = 60502\n\n def init_dataset(self):\n self._set_dataset_info()\n self.root = os.path.join(self.root, \"online_products\")\n info_folder = os.path.join(self.root, 'Info_Files')\n img_folder = os.path.join(self.root, 'images')\n self.img_paths, self.labels = [], []\n self.init_info_file_name()\n\n curr_file = read_csv(\n os.path.join(info_folder, self.info_file_name),\n delim_whitespace=True, \n header=0\n ).values\n self.img_paths.extend(\n [os.path.join(img_folder, name) \n for name in list(curr_file[:, 3])]\n )\n self.labels.extend(list(curr_file[:, 1] - 1))\n \n self.labels = np.array(self.labels)\n assert len(np.unique(self.labels)) == self.assert_num_classes\n assert self.__len__() == self.assert_num_samples\n \n def init_info_file_name(self):\n if self.phase == \"train\":\n self.info_file_name = \"Ebay_train.txt\"\n elif self.phase == \"test\":\n self.info_file_name = \"Ebay_test.txt\"\n else:\n raise KeyError(\n \"Invalid dataset phase: {} / {}\".format(\n self.phase,\n self.__class__.__name__\n )\n )\n\n def get_labels(self):\n return self.labels\n \n def __len__(self):\n return len(self.labels)\n\n def get_image_label(self, idx):\n path = self.img_paths[idx]\n img = Image.open(path).convert(\"RGB\")\n label = self.labels[idx]\n return img, label"
]
| [
[
"numpy.array",
"numpy.unique"
]
]
|
JakobWegmann/election_calculator | [
"16c9fabc045a75259da9413bce9f4ba027e9f63f"
]
| [
"src/data_management/load_data.py"
]
| [
"import os\nimport pickle\n\nimport numpy as np\nimport pandas as pd\n\n\nuser = \"Dominik\"\n\nif user == \"Dominik\":\n os.chdir(\"/home/dominik/Dokumente/election_calculator/src/data_management/\")\nelse:\n pass\n\nif user == \"Jakob\":\n os.chdir(\n \"C:/Users/jakob/sciebo/Bonn/6th_semester/election_calculator/src/data_management\"\n )\nelse:\n pass\n\n# if user == \"Jakob\":\n# path = \"C:/Users/jakob/sciebo/Bonn/6th_semester/election_calculator\"\n# elif user == \"Dominik\":\n# path = \"/home/dominik/Dokumente/election_calcuator\"\n# else:\n# print(\"No such user exists!\")\n\n# data = pd.read_csv(\n# f\"{path}/src/original_data/election_results/btw2017_kerg.csv\",\n# sep=\";\",\n# skiprows=5,\n# header=None,\n# error_bad_lines=False,\n# )\n\ndata = pd.read_csv(\n \"../original_data/election_results/btw2017_kerg.csv\",\n sep=\";\",\n skiprows=5,\n header=None,\n error_bad_lines=False,\n # encoding=\"latin1\",\n)\ndata\n\n# * Delete unnecessary columns in two steps.\ndelete = [\"Nr\", \"gehört zu\", \"Vorperiode\"]\nfor item in delete:\n data = data.loc[:, ~(data == item).any()] # Keep if no cell contains `item`\n data.columns = range(data.shape[1]) # Resets column indices to 0,1,2,...\n\n\ndelete = [\"Wahlberechtigte\", \"Wähler\", \"Ungültige\", \"Gültige\"]\nfor item in delete:\n erststimmen = data.loc[:, (data == item).any()].columns[0]\n zweitstimmen = erststimmen # After drop the columns shifted.\n\n data.drop(data.columns[erststimmen], axis=1, inplace=True)\n data.drop(data.columns[zweitstimmen], axis=1, inplace=True)\n data.columns = range(data.shape[1])\ndata.loc[:, :7]\n\nfor i in range(1, data.shape[1], 2):\n data.loc[0, i + 1] = data.loc[0, i]\ndata.loc[:, :3]\n\ndata.drop(index=2, inplace=True)\ndata.reset_index(inplace=True, drop=True)\ndata = data.T\n\n# * Drop all empty columns and rows.\nzero_cols = []\nfor column in range(0, data.shape[1], 1):\n if data[column].isnull().all():\n zero_cols.append(column)\n else:\n pass\ndata.drop(columns=zero_cols, inplace=True)\ndata.columns = range(data.shape[1])\n\nzero_rows = []\nfor row in range(0, len(data.index), 1):\n if data.loc[row, :].isnull().all():\n zero_rows.append(row)\n else:\n pass\ndata.drop(index=zero_rows, inplace=True)\ndata.columns = data.loc[0, :]\ndata.drop(index=0, inplace=True)\ndata.reset_index(drop=True, inplace=True)\n\ndata.rename(columns={\"Gebiet\": \"Partei\", np.nan: \"Stimme\"}, inplace=True)\ndata.fillna(0, inplace=True)\n\nparteien = {\n \"Christlich Demokratische Union Deutschlands\": \"CDU\",\n \"Sozialdemokratische Partei Deutschlands\": \"SPD\",\n \"Christlich-Soziale Union in Bayern e.V.\": \"CSU\",\n \"BÜNDNIS 90/DIE GRÜNEN\": \"Grüne\",\n \"Freie Demokratische Partei\": \"FDP\",\n \"Alternative für Deutschland\": \"AfD\",\n}\n\nfor partei in parteien.keys():\n data = data.replace(partei, parteien[partei])\n\ndata.to_json(\"../../bld/data/raw_data.json\")\n\n# * Get a list of all parties.\nparteien = data.loc[:, \"Partei\"].to_list()\n\n# * All federal states.\nbundesländer = [\n \"Bayern\",\n \"Baden-Württemberg\",\n \"Saarland\",\n \"Nordrhein-Westfalen\",\n \"Berlin\",\n \"Hamburg\",\n \"Niedersachsen\",\n \"Thüringen\",\n \"Bremen\",\n \"Sachsen\",\n \"Sachsen-Anhalt\",\n \"Brandenburg\",\n \"Rheinland-Pfalz\",\n \"Schleswig-Holstein\",\n \"Hessen\",\n \"Mecklenburg-Vorpommern\",\n]\n\n# * Get Wahlkreise of each Bundesland in a dictionary columns.\nbundesländer_col = {}\nfor bundesland in bundesländer:\n bundesländer_col[bundesland] = data.columns.get_loc(bundesland)\n\nbundesländer_col[\"Stimme\"] = 1\nbundesländer_col = dict(sorted(bundesländer_col.items(), key=lambda item: item[1]))\nbundesländer_wahlkreise = {}\n\nprevious_key = \"Stimme\"\nfor key in bundesländer_col:\n bundesländer_wahlkreise[key] = data.columns[\n (bundesländer_col[previous_key] + 1) : (bundesländer_col[key])\n ].tolist()\n previous_key = key\nbundesländer_wahlkreise.pop(\"Stimme\", None) # Remove the key 'Stimme'\n\nwith open(\"../../bld/data/wahlkreis_bundeslaender.pickle\", \"wb\") as handle:\n pickle.dump(bundesländer_wahlkreise, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n# * Get population data.\ndata = pd.read_csv(\n \"../original_data/population/bevoelkerung_2016.csv\",\n sep=\";\",\n skiprows=5,\n header=None,\n error_bad_lines=False,\n encoding=\"cp1252\",\n)\n\nto_drop = [\n 0,\n 3,\n 4,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n] # by-hand selection; but no problem as format will always be the same\ndata.drop(columns=to_drop, inplace=True)\ndata.columns = range(data.shape[1])\n\ndata.loc[0, 0] = \"Bundesland\"\ndata.loc[0, 1] = \"Altersgruppe\"\n\nzero_rows = []\nfor row in range(0, len(data.index), 1):\n if data.loc[row, :].isnull().all():\n zero_rows.append(row)\n else:\n pass\ndata.drop(index=zero_rows, inplace=True)\ndata.columns = data.loc[0, :]\ndata.drop(index=[0, 1], inplace=True)\ndata.reset_index(drop=True, inplace=True)\n\ndata = data[(data == \"Insgesamt\").any(axis=1)]\ndata.drop(columns=[\"Altersgruppe\"], inplace=True)\ndata.reset_index(drop=True, inplace=True)\n\ndata.to_json(\"../../bld/data/population_data.json\")\n\n# * Get Bewerber data.\ndata = pd.read_csv(\n \"../original_data/candidates/btw2017bewerb_gewaehlt.csv\",\n sep=\";\",\n skiprows=7,\n header=0,\n error_bad_lines=False,\n encoding=\"cp1252\",\n)\n\ndata[\"Bundesland\"] = data[\"Wahlkreis_Land\"]\ndata[\"Bundesland\"].fillna(data[\"Liste_Land\"], inplace=True)\n\nrename_dict = {\n \"BY\" : \"Bayern\",\n \"BW\" : \"Baden-Württemberg\",\n \"SL\" : \"Saarland\",\n \"NW\" : \"Nordrhein-Westfalen\",\n \"BE\" : \"Berlin\",\n \"HH\" : \"Hamburg\",\n \"NI\" : \"Niedersachsen\",\n \"TH\" : \"Thüringen\",\n \"HB\" : \"Bremen\",\n \"SN\" : \"Sachsen\",\n \"ST\" : \"Sachsen-Anhalt\",\n \"BB\" : \"Brandenburg\",\n \"RP\" : \"Rheinland-Pfalz\",\n \"SH\" : \"Schleswig-Holstein\",\n \"HE\" : \"Hessen\",\n \"MV\" : \"Mecklenburg-Vorpommern\",\n}\n\ndata[\"Bundesland\"].replace(rename_dict, inplace=True)\ndata[\"Partei\"] = data[\"Wahlkreis_ParteiBez\"]\ndata[\"Partei\"].fillna(data[\"Liste_ParteiBez\"], inplace=True)\ndict = {}\n\n\nfor bundesland in list(set(data[\"Bundesland\"].tolist())):\n bundesland_df = data[data[\"Bundesland\"] == bundesland].copy()\n key_value = {}\n for partei in list(set(bundesland_df[\"Partei\"].tolist())):\n key_value[partei] = bundesland_df[bundesland_df[\"Partei\"] == partei].copy()\n key_value[partei].drop(\n columns=[\n \"Wahlkreis_Land\",\n \"Wahlkreis_ParteiBez\",\n \"Wahlkreis_ParteiKurzBez\", \n \"Liste_Land\",\n \"Liste_ParteiBez\", \n \"Liste_ParteiKurzBez\",\n \"Bundesland\", \n \"Partei\",\n ], \n inplace=True,\n )\n key_value[partei].sort_values(by=[\"Liste_Platz\"], inplace=True)\n key_value[partei].reset_index(inplace=True, drop=True)\n dict[bundesland] = key_value\n\nwith open(\"../../bld/data/bundesland_partei_listen.pickle\", \"wb\") as handle:\n pickle.dump(dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n# * Get Wahlkreise data.\ndata = pd.read_csv(\n \"../original_data/wahlkreise_info/20170228_BTW17_WKr_Gemeinden_ASCII.csv\",\n sep=\";\",\n skiprows=7,\n header=0,\n error_bad_lines=False,\n encoding=\"cp1252\",\n)\n\ndata.drop(columns=[\"Wahlkreis-von\", \"Wahlkreis-bis\", \"PLZ-mehrere\"], inplace=True)\ndata[\"Gemeindename\"] = [s.split(\", \", 1)[0] for s in data[\"Gemeindename\"].tolist()]\n\ndict = {}\nfor index in range(len(data[\"Gemeindename\"].tolist())):\n dict[data[\"Gemeindename\"].iloc[index]] = data[\"Wahlkreis-Bez\"].iloc[index]\n\nwith open(\"../../bld/data/gemeinde_wahlkreis_listen.pickle\", \"wb\") as handle:\n pickle.dump(dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n"
]
| [
[
"pandas.read_csv"
]
]
|
jianpengz/MB-DCNN | [
"17777be932a1435d22eab655f1c33fcd0a4b41e1"
]
| [
"generate_Coarse_mask.py"
]
| [
"import torch\r\nimport cv2\r\nimport numpy as np\r\nimport torch.backends.cudnn as cudnn\r\nimport os\r\nfrom tqdm import tqdm\r\nfrom skimage import io\r\nfrom net.models import deeplabv3plus\r\nfrom dataset.my_datasets import MyGenDataSet\r\nfrom torch.utils import data\r\n\r\n\r\ndef generate_mode_seg0(dataloader, model, path):\r\n\r\n for index, batch in tqdm(enumerate(dataloader)):\r\n image, name = batch\r\n image = image.cuda()\r\n # print(name)\r\n\r\n rot_90 = torch.rot90(image, 1, [2, 3])\r\n rot_180 = torch.rot90(image, 2, [2, 3])\r\n rot_270 = torch.rot90(image, 3, [2, 3])\r\n hor_flip = torch.flip(image, [-1])\r\n ver_flip = torch.flip(image, [-2])\r\n image = torch.cat([image, rot_90, rot_180, rot_270, hor_flip, ver_flip], dim=0)\r\n\r\n model.eval()\r\n with torch.no_grad():\r\n pred = model(image)\r\n\r\n pred = pred[0:1] + torch.rot90(pred[1:2], 3, [2, 3]) + torch.rot90(pred[2:3], 2, [2, 3]) + torch.rot90(pred[3:4], 1, [2, 3]) + torch.flip(pred[4:5], [-1]) + torch.flip(pred[5:6], [-2])\r\n\r\n pred = torch.softmax(pred, dim=1).cpu().data.numpy()\r\n pred_arg = np.int16(np.argmax(pred[0], axis=0))\r\n\r\n io.imsave(os.path.join(path, name[0]), np.int64(pred_arg) * 255)\r\n\r\n return True\r\n\r\ndef generate_mode_seg1(dataloader, model, path):\r\n\r\n for index, batch in tqdm(enumerate(dataloader)):\r\n image_ori, image, name = batch\r\n image = image.cuda()\r\n # print(name)\r\n\r\n rot_90 = torch.rot90(image, 1, [2, 3])\r\n rot_180 = torch.rot90(image, 2, [2, 3])\r\n rot_270 = torch.rot90(image, 3, [2, 3])\r\n hor_flip = torch.flip(image, [-1])\r\n ver_flip = torch.flip(image, [-2])\r\n image = torch.cat([image, rot_90, rot_180, rot_270, hor_flip, ver_flip], dim=0)\r\n\r\n model.eval()\r\n with torch.no_grad():\r\n pred = model(image)\r\n\r\n pred = pred[0:1] + torch.rot90(pred[1:2], 3, [2, 3]) + torch.rot90(pred[2:3], 2, [2, 3]) + torch.rot90(pred[3:4], 1, [2, 3]) + torch.flip(pred[4:5], [-1]) + torch.flip(pred[5:6], [-2])\r\n\r\n pred = torch.softmax(pred, dim=1).cpu().data.numpy()\r\n pred_arg = np.int16(np.argmax(pred[0], axis=0))\r\n pred_arg = cv2.resize(pred_arg, (image_ori.shape[2], image_ori.shape[1]), interpolation=cv2.INTER_NEAREST)\r\n\r\n io.imsave(os.path.join(path, name[0]), np.int64(pred_arg) * 255)\r\n\r\n return True\r\n\r\n\r\n########################### Load coarse segmentation network.\r\ncudnn.enabled = True\r\nmodel = deeplabv3plus(num_classes=2)\r\nmodel.cuda()\r\nmodel = torch.nn.DataParallel(model)\r\npretrained_dict = torch.load('models/DR_CoarseSN/CoarseSN.pth')\r\nmodel.load_state_dict(pretrained_dict)\r\nmodel.eval()\r\nmodel.float()\r\n\r\n\r\n########################### Coarse_masks for MaskCN\r\n\r\n#### Training\r\nclass_p = 'Training'\r\ndata_root = 'dataset/cls_data/'+class_p+'_Add_resize_crop_cls/'\r\ndata_list = 'dataset/ISIC/'+class_p+'_Add_cls.txt'\r\ndataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=0), batch_size=1, shuffle=False, num_workers=8,\r\n pin_memory=True)\r\n\r\npath = 'Coarse_masks/'+class_p+'_MaskCN/'\r\nif not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\ngenerate_mode_seg0(dataloader, model, path)\r\n\r\n\r\n#### Validation\r\nclass_p = 'Validation' ### 'Testing'\r\ndata_root = 'dataset/cls_data/'+class_p+'_resize_crop9_cls/'\r\ndata_list = 'dataset/ISIC/'+class_p+'_crop9_cls.txt'\r\ndataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=0), batch_size=1, shuffle=False, num_workers=8,\r\n pin_memory=True)\r\n\r\npath = 'Coarse_masks/'+class_p+'_MaskCN/'\r\nif not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\ngenerate_mode_seg0(dataloader, model, path)\r\n\r\n\r\n\r\n########################### Coarse_masks for EnhancedSN\r\n\r\n#### Training\r\nclass_p = 'Training'\r\ndata_root = 'dataset/seg_data/'+class_p+'_resize_seg/'\r\ndata_list = 'dataset/ISIC/'+class_p+'_seg.txt'\r\ndataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=1), batch_size=1, shuffle=False, num_workers=8,\r\n pin_memory=True)\r\n\r\npath = 'Coarse_masks/'+class_p+'_EnhancedSN/'\r\nif not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\ngenerate_mode_seg1(dataloader, model, path)\r\n\r\n\r\n#### Validation\r\nclass_p = 'Validation' ### 'Testing'\r\ndata_root = 'dataset/seg_data/ISIC-2017_'+class_p+'_Data/'\r\ndata_list = 'dataset/ISIC/'+class_p+'_seg.txt'\r\ndataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=1), batch_size=1, shuffle=False, num_workers=8,\r\n pin_memory=True)\r\n\r\npath = 'Coarse_masks/'+class_p+'_EnhancedSN/'\r\nif not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\ngenerate_mode_seg1(dataloader, model, path)\r\n\r\n\r\n"
]
| [
[
"torch.cat",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.rot90",
"torch.softmax",
"numpy.argmax",
"torch.load",
"numpy.int64",
"torch.flip"
]
]
|
METASPACE2020/sm-engine | [
"01d214223b36d44ba63dd5b35a162b55094b4d27"
]
| [
"sm/engine/tests/test_png_generator.py"
]
| [
"import numpy as np\nfrom numpy.testing import assert_almost_equal, assert_equal\nfrom png import Reader\n\nfrom sm.engine.png_generator import PngGenerator\n\n\ndef test_png_gen_greyscale_works():\n alpha_ch = np.array([[1, 1, 1]])\n gen = PngGenerator(alpha_ch, greyscale=True)\n\n img_data = np.array([[0., 5., 10.]])\n norm_img_data = (img_data - img_data.min()) / (img_data.max() - img_data.min())\n fp = gen.generate_png(img_data)\n\n reader = Reader(file=fp)\n width, height, pixels, _ = reader.asFloat()\n assert_equal(width, 3)\n assert_equal(height, 1)\n\n grey_shape = img_data.shape + (2,)\n assert_almost_equal(np.array(list(pixels)).reshape(grey_shape)[:,:,0], norm_img_data, decimal=4)\n"
]
| [
[
"numpy.array",
"numpy.testing.assert_equal"
]
]
|
matteodeggi/SC3-CVRP | [
"41c8f86bdcf5c1ceb684d3e267330c03446e5509"
]
| [
"Utils/Simulation.py"
]
| [
"import time\nimport pandas as pd\nfrom datetime import timedelta\nfrom haversine import haversine\nfrom Shift import Shift\n\n'''\nClasse che astrae la simulazione di un intero periodo.\n'''\nclass Simulation:\n def __init__(self, depot, config, window_size=6, max_size=175, filter_function = None, filter_kwargs={}):\n self.depot = depot\n self.cluster_class = config['cluster_class']\n self.cluster_kwargs = config['cluster_kwargs']\n self.graph_class = config['graph_class']\n self.graph_kwargs = config['graph_kwargs']\n self.filter_function = filter_function\n self.window_size = window_size\n self.max_size = max_size\n self.filter_kwargs = filter_kwargs\n self.distances = []\n self.routes = []\n self.timetables = []\n\n def get_score(self):\n return sum(self.distances)\n\n def get_distances(self):\n return self.distances\n \n def get_results(self):\n return self.routes\n\n def get_timetables(self, speed=30, emp_time=60):\n return self.timetables\n\n def to_csv(self, file):\n with open(file, 'w') as f:\n csv_output = []\n for w in range(len(self.routes)):\n for v in range(len(self.routes[w])):\n for o in range(len(self.routes[w][v])):\n if(self.timetables):\n csv_output.append(f'{self.routes[w][v][o][0]},{self.routes[w][v][o][1]},{o+1},{v+1},{w+1}, {self.timetables[w][v][o]}\\n')\n else:\n csv_output.append(f'{self.routes[w][v][o][0]},{self.routes[w][v][o][1]},{o+1},{v+1},{w+1}\\n')\n if(self.timetables):\n f.write('lat,long,order,vehicle,window,timetable\\n')\n else:\n f.write('lat,long,order,vehicle,window\\n')\n f.writelines(csv_output)\n\n '''\n Metodo per la computazione della simulazione.\n La simulazione tiene conto del vincolo per il quale ogni cestino deve essere svuotato almeno una volta al giorno.\n Tutti i cestini non svuotati durante la giornata vengono aggiunti all'ultimo turno.\n Restituisce i percorsi trovati per ogni turno.\n '''\n def compute_simulation(self, data, start_date=None, end_date=None, speed=None, emp_time=None, debug=False):\n i = 0\n start_date = data.index[0] if start_date is None else pd.to_datetime(start_date)\n end_date = data.index[-1] if end_date is None else pd.to_datetime(end_date)\n\n current_start_date = start_date\n current_end_date = start_date + timedelta(hours=self.window_size)-timedelta(seconds=1)\n\n # seriali di tutti i cestini\n bin_serials = data.bin_serial.unique()\n all_bins = data.drop_duplicates(subset='bin_serial')\n all_bins.loc[:, 'bin_level'] = 4\n \n # inizializzazione cestini non svuotati\n not_full = all_bins.copy()\n\n while(True):\n if(debug): print(str(current_start_date) + \" - \" + str(current_end_date))\n current_window = data[current_start_date : current_end_date]\n\n # verifica se il turno corrente è l'ultimo della giornata\n current_start_date += timedelta(hours=self.window_size)\n current_end_date += timedelta(hours=self.window_size)\n last_window = True if(current_end_date.day != (current_start_date-timedelta(seconds=1)).day) else False\n # aggiunta dei cestini non svuotati a quelli da svuotare, se ultimo turno della giornata\n current_window = pd.concat([current_window, not_full]) if last_window else current_window\n # cestini da svuotare\n current_window = current_window if self.filter_function is None else self.filter_function(current_window, **self.filter_kwargs)\n # aggiornamento lista dei cestini non svuotati\n not_full = all_bins.copy() if last_window else not_full[~not_full.bin_serial.isin(current_window['bin_serial'].unique())] \n \n current_shift = Shift(current_window, self.depot, self.cluster_class, self.graph_class, start_date=current_start_date)\n \n # clustering\n start_time = time.time()\n clusters = current_shift.clusterize(columns=['latitude', 'longitude'], max_size=self.max_size, kwargs=self.cluster_kwargs)\n current_shift.build_graphs(columns=['latitude', 'longitude'], kwargs=self.graph_kwargs)\n \n # routing\n results = current_shift.get_routes()\n #results = [[self.depot, *result[1:-1], self.depot] for result in results]\n self.routes.append(results)\n self.distances.append(current_shift.get_distance())\n if(speed is not None or emp_time is not None): self.timetables.append(current_shift.get_timetables(haversine, speed, emp_time))\n if(debug): print(f\"Distanza totale turno: {str(self.distances[-1])} km. Veicoli usati: {str(len(results))}.\")\n if(debug): print(f\"Tempo richiesto: {time.time() - start_time}s.\")\n del current_shift\n\n if(current_end_date >= end_date): # Stop condition\n break\n\n return self.routes"
]
| [
[
"pandas.to_datetime",
"pandas.concat"
]
]
|
z-bingo/FastDVDNet | [
"11da30bb98705560c763ea2eea5045bc7c5562b2"
]
| [
"utils/file_utils.py"
]
| [
"import os, sys, shutil\nimport torch\nimport glob\n\ndef rm_sub_files(path):\n shutil.rmtree(path)\n os.mkdir(path)\n\ndef load_checkpoint(path='./models', is_best=True):\n if is_best:\n ckpt_file = os.path.join(path, 'model_best.pth.tar')\n else:\n files = glob.glob(os.path.join(path, '{:06d}.pth.tar'))\n files.sort()\n ckpt_file = files[-1]\n return torch.load(ckpt_file)\n\ndef save_checkpoint(state, globel_iter, path='./models', is_best=True, max_keep=10):\n filename = os.path.join(path, '{:06d}.pth.tar'.format(globel_iter))\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, os.path.join(path, 'model_best.pth.tar'))\n\n files = sorted(os.listdir(path))\n rm_files = files[0: max(0, len(files)-max_keep)]\n for f in rm_files:\n os.remove(os.path.join(path, f))"
]
| [
[
"torch.save",
"torch.load"
]
]
|
ZJU-RL/zjurl | [
"5c85eb11babb69a09604a04f0eb7bdc5a96f08f0"
]
| [
"rllite/RainbowDQN/RainbowDQN.py"
]
| [
"import math\nimport random\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.autograd as autograd\nimport torch.nn.functional as F\n\nfrom rllite.common import NoisyLinear\nfrom rllite.common import ReplayBuffer\nfrom rllite.common import make_atari, wrap_deepmind, wrap_pytorch\n\nUSE_CUDA = torch.cuda.is_available()\n\n\nclass TinyRainbowDQN(nn.Module):\n def __init__(self, num_inputs, num_actions, num_atoms, Vmin, Vmax):\n super(TinyRainbowDQN, self).__init__()\n\n self.num_inputs = num_inputs\n self.num_actions = num_actions\n self.num_atoms = num_atoms\n self.Vmin = Vmin\n self.Vmax = Vmax\n\n self.linear1 = nn.Linear(num_inputs, 32)\n self.linear2 = nn.Linear(32, 64)\n\n self.noisy_value1 = NoisyLinear(64, 64, use_cuda=USE_CUDA)\n self.noisy_value2 = NoisyLinear(64, self.num_atoms, use_cuda=USE_CUDA)\n\n self.noisy_advantage1 = NoisyLinear(64, 64, use_cuda=USE_CUDA)\n self.noisy_advantage2 = NoisyLinear(64, self.num_atoms * self.num_actions, use_cuda=USE_CUDA)\n\n def forward(self, x):\n batch_size = x.size(0)\n\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n\n value = F.relu(self.noisy_value1(x))\n value = self.noisy_value2(value)\n\n advantage = F.relu(self.noisy_advantage1(x))\n advantage = self.noisy_advantage2(advantage)\n\n value = value.view(batch_size, 1, self.num_atoms)\n advantage = advantage.view(batch_size, self.num_actions, self.num_atoms)\n\n x = value + advantage - advantage.mean(1, keepdim=True)\n x = F.softmax(x.view(-1, self.num_atoms)).view(-1, self.num_actions, self.num_atoms)\n\n return x\n\n def reset_noise(self):\n self.noisy_value1.reset_noise()\n self.noisy_value2.reset_noise()\n self.noisy_advantage1.reset_noise()\n self.noisy_advantage2.reset_noise()\n\n def act(self, state):\n state = torch.FloatTensor(state).unsqueeze(0)\n dist = self.forward(state).data.cpu()\n dist = dist * torch.linspace(self.Vmin, self.Vmax, self.num_atoms)\n action = dist.sum(2).max(1)[1].numpy()[0]\n return action\n\n\nclass RainbowDQN(object):\n def __init__(self, env_id=\"CartPole-v0\", Vmin=-10, Vmax=10, num_atoms=51):\n self.Vmin = Vmin\n self.Vmax = Vmax\n self.num_atoms = num_atoms\n\n self.env_id = env_id\n self.env = gym.make(self.env_id)\n self.current_model = TinyRainbowDQN(self.env.observation_space.shape[0], self.env.action_space.n, self.num_atoms, self.Vmin, self.Vmax)\n self.target_model = TinyRainbowDQN(self.env.observation_space.shape[0], self.env.action_space.n, self.num_atoms, self.Vmin, self.Vmax)\n if torch.cuda.is_available():\n self.current_model = self.current_model.cuda()\n self.target_model = self.target_model.cuda()\n\n self.optimizer = optim.Adam(self.current_model.parameters(), 0.001)\n self.replay_buffer = ReplayBuffer(10000)\n\n self.update_target(self.current_model, self.target_model)\n self.losses = []\n\n def update_target(self, current_model, target_model):\n target_model.load_state_dict(current_model.state_dict())\n\n def projection_distribution(self, next_state, rewards, dones):\n batch_size = next_state.size(0)\n\n delta_z = float(self.Vmax - self.Vmin) / (self.num_atoms - 1)\n support = torch.linspace(self.Vmin, self.Vmax, self.num_atoms)\n\n next_dist = self.target_model(next_state).data.cpu() * support\n next_action = next_dist.sum(2).max(1)[1]\n next_action = next_action.unsqueeze(1).unsqueeze(1).expand(next_dist.size(0), 1, next_dist.size(2))\n next_dist = next_dist.gather(1, next_action).squeeze(1)\n\n rewards = rewards.unsqueeze(1).expand_as(next_dist)\n dones = dones.unsqueeze(1).expand_as(next_dist)\n support = support.unsqueeze(0).expand_as(next_dist)\n\n Tz = rewards + (1 - dones) * 0.99 * support\n Tz = Tz.clamp(min=self.Vmin, max=self.Vmax)\n b = (Tz - self.Vmin) / delta_z\n l = b.floor().long()\n u = b.ceil().long()\n\n offset = torch.linspace(0, (batch_size - 1) * self.num_atoms, batch_size).long() \\\n .unsqueeze(1).expand(batch_size, self.num_atoms)\n\n proj_dist = torch.zeros(next_dist.size())\n proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))\n proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))\n\n return proj_dist\n\n def train_step(self, gamma=0.99, batch_size=32):\n state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)\n\n state = torch.FloatTensor(np.float32(state))\n next_state = torch.FloatTensor(np.float32(next_state))\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(np.float32(done))\n\n proj_dist = self.projection_distribution(next_state, reward, done)\n\n dist = self.current_model(state)\n action = action.unsqueeze(1).unsqueeze(1).expand(batch_size, 1, self.num_atoms)\n dist = dist.gather(1, action).squeeze(1)\n dist.data.clamp_(0.01, 0.99)\n loss = -(proj_dist * dist.log()).sum(1)\n loss = loss.mean()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.current_model.reset_noise()\n self.target_model.reset_noise()\n\n self.losses.append(loss.item())\n\n def learn(self, num_frames=15000, batch_size=32):\n all_rewards = []\n episode_reward = 0\n\n state = self.env.reset()\n for frame_idx in range(1, num_frames + 1):\n action = self.current_model.act(state)\n\n next_state, reward, done, _ = self.env.step(action)\n self.replay_buffer.push(state, action, reward, next_state, done)\n\n state = next_state\n episode_reward += reward\n\n if done:\n state = self.env.reset()\n all_rewards.append(episode_reward)\n episode_reward = 0\n\n if len(self.replay_buffer) > batch_size:\n self.train_step()\n\n # if frame_idx % 200 == 0:\n if frame_idx == num_frames:\n plt.figure(figsize=(20, 5))\n plt.subplot(121)\n plt.title('frame %s. reward: %s' % (frame_idx, np.mean(all_rewards[-10:])))\n plt.plot(all_rewards)\n plt.subplot(122)\n plt.title('loss')\n plt.plot(self.losses)\n plt.show()\n\n if frame_idx % 1000 == 0:\n self.update_target(self.current_model, self.target_model)\n\n print(frame_idx)\n\n\nclass TinyRainbowCnnDQN(nn.Module):\n def __init__(self, input_shape, num_actions, num_atoms, Vmin, Vmax):\n super(TinyRainbowCnnDQN, self).__init__()\n\n self.input_shape = input_shape\n self.num_actions = num_actions\n self.num_atoms = num_atoms\n self.Vmin = Vmin\n self.Vmax = Vmax\n\n self.features = nn.Sequential(\n nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n self.noisy_value1 = NoisyLinear(self.feature_size(), 512, use_cuda=USE_CUDA)\n self.noisy_value2 = NoisyLinear(512, self.num_atoms, use_cuda=USE_CUDA)\n\n self.noisy_advantage1 = NoisyLinear(self.feature_size(), 512, use_cuda=USE_CUDA)\n self.noisy_advantage2 = NoisyLinear(512, self.num_atoms * self.num_actions, use_cuda=USE_CUDA)\n\n def forward(self, x):\n batch_size = x.size(0)\n\n x = x / 255.\n x = self.features(x)\n x = x.view(batch_size, -1)\n\n value = F.relu(self.noisy_value1(x))\n value = self.noisy_value2(value)\n\n advantage = F.relu(self.noisy_advantage1(x))\n advantage = self.noisy_advantage2(advantage)\n\n value = value.view(batch_size, 1, self.num_atoms)\n advantage = advantage.view(batch_size, self.num_actions, self.num_atoms)\n\n x = value + advantage - advantage.mean(1, keepdim=True)\n x = F.softmax(x.view(-1, self.num_atoms)).view(-1, self.num_actions, self.num_atoms)\n\n return x\n\n def reset_noise(self):\n self.noisy_value1.reset_noise()\n self.noisy_value2.reset_noise()\n self.noisy_advantage1.reset_noise()\n self.noisy_advantage2.reset_noise()\n\n def feature_size(self):\n return self.features(autograd.Variable(torch.zeros(1, *self.input_shape))).view(1, -1).size(1)\n\n def act(self, state):\n state = torch.FloatTensor(np.float32(state)).unsqueeze(0)\n dist = self.forward(state).data.cpu()\n dist = dist * torch.linspace(self.Vmin, self.Vmax, self.num_atoms)\n action = dist.sum(2).max(1)[1].numpy()[0]\n return action\n\n\nclass RainbowCnnDQN(object):\n def __init__(self, env_id=\"PongNoFrameskip-v4\", num_atoms=51, Vmin=-10, Vmax=10):\n self.num_atoms = num_atoms\n self.Vmin = Vmin\n self.Vmax = Vmax\n self.env_id = env_id\n self.env = make_atari(self.env_id)\n self.env = wrap_deepmind(self.env)\n self.env = wrap_pytorch(self.env)\n\n self.current_model = TinyRainbowCnnDQN(self.env.observation_space.shape, self.env.action_space.n, num_atoms, Vmin, Vmax)\n self.target_model = TinyRainbowCnnDQN(self.env.observation_space.shape, self.env.action_space.n, num_atoms, Vmin, Vmax)\n if USE_CUDA:\n self.current_model = self.current_model.cuda()\n self.target_model = self.target_model.cuda()\n\n self.optimizer = optim.Adam(self.current_model.parameters(), lr=0.0001)\n self.update_target(self.current_model, self.target_model)\n\n self.replay_buffer = ReplayBuffer(100000)\n self.losses = []\n\n def update_target(self, current_model, target_model):\n target_model.load_state_dict(current_model.state_dict())\n\n def projection_distribution(self, next_state, rewards, dones):\n batch_size = next_state.size(0)\n\n delta_z = float(self.Vmax - self.Vmin) / (self.num_atoms - 1)\n support = torch.linspace(self.Vmin, self.Vmax, self.num_atoms)\n\n next_dist = self.target_model(next_state).data.cpu() * support\n next_action = next_dist.sum(2).max(1)[1]\n next_action = next_action.unsqueeze(1).unsqueeze(1).expand(next_dist.size(0), 1, next_dist.size(2))\n next_dist = next_dist.gather(1, next_action).squeeze(1)\n\n rewards = rewards.unsqueeze(1).expand_as(next_dist)\n dones = dones.unsqueeze(1).expand_as(next_dist)\n support = support.unsqueeze(0).expand_as(next_dist)\n\n Tz = rewards + (1 - dones) * 0.99 * support\n Tz = Tz.clamp(min=self.Vmin, max=self.Vmax)\n b = (Tz - self.Vmin) / delta_z\n l = b.floor().long()\n u = b.ceil().long()\n\n offset = torch.linspace(0, (batch_size - 1) * self.num_atoms, batch_size).long() \\\n .unsqueeze(1).expand(batch_size, self.num_atoms)\n\n proj_dist = torch.zeros(next_dist.size())\n proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))\n proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))\n\n return proj_dist\n\n def train_step(self, gamma=0.99, batch_size=32):\n state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)\n\n state = torch.FloatTensor(np.float32(state))\n next_state = torch.FloatTensor(np.float32(next_state))\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(np.float32(done))\n\n proj_dist = self.projection_distribution(next_state, reward, done)\n\n dist = self.current_model(state)\n action = action.unsqueeze(1).unsqueeze(1).expand(batch_size, 1, self.num_atoms)\n dist = dist.gather(1, action).squeeze(1)\n dist.data.clamp_(0.01, 0.99)\n loss = -(proj_dist * dist.log()).sum(1)\n loss = loss.mean()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.current_model.reset_noise()\n self.target_model.reset_noise()\n\n self.losses.append(loss.item())\n\n def learn(self, num_frames=1000000, replay_initial=10000, batch_size=32):\n all_rewards = []\n episode_reward = 0\n\n state = self.env.reset()\n for frame_idx in range(1, num_frames + 1):\n action = self.current_model.act(state)\n\n next_state, reward, done, _ = self.env.step(action)\n self.replay_buffer.push(state, action, reward, next_state, done)\n\n state = next_state\n episode_reward += reward\n\n if done:\n state = self.env.reset()\n all_rewards.append(episode_reward)\n episode_reward = 0\n\n if len(self.replay_buffer) > replay_initial:\n self.train_step()\n\n if frame_idx % 10000 == 0:\n plt.figure(figsize=(20, 5))\n plt.subplot(121)\n plt.title('frame %s. reward: %s' % (frame_idx, np.mean(all_rewards[-10:])))\n plt.plot(all_rewards)\n plt.subplot(122)\n plt.title('loss')\n plt.plot(self.losses)\n plt.show()\n\n if frame_idx % 1000 == 0:\n self.update_target(self.current_model, self.target_model)\n\n print(frame_idx)\n\n\nif __name__ == '__main__':\n model1 = RainbowDQN()\n model1.learn()\n model2 = RainbowCnnDQN()\n model2.learn()\n"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.FloatTensor",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"torch.linspace",
"numpy.mean",
"matplotlib.pyplot.figure",
"torch.nn.ReLU",
"numpy.float32",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.nn.Conv2d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot"
]
]
|
hookSSi/metrics | [
"a1116cb0edbe95db606912c9c05ae9c35fc983e2"
]
| [
"torchmetrics/classification/accuracy.py"
]
| [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.classification.accuracy import (\n _accuracy_compute,\n _accuracy_update,\n _check_subset_validity,\n _mode,\n _subset_accuracy_compute,\n _subset_accuracy_update,\n)\nfrom torchmetrics.utilities.enums import DataType\n\nfrom torchmetrics.classification.stat_scores import StatScores # isort:skip\n\n\nclass Accuracy(StatScores):\n r\"\"\"\n Computes Accuracy_:\n\n .. math::\n \\text{Accuracy} = \\frac{1}{N}\\sum_i^N 1(y_i = \\hat{y}_i)\n\n Where :math:`y` is a tensor of target values, and :math:`\\hat{y}` is a\n tensor of predictions.\n\n For multi-class and multi-dimensional multi-class data with probability or logits predictions, the\n parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the\n top-K highest probability or logit score items are considered to find the correct label.\n\n For multi-label and multi-dimensional multi-class inputs, this metric computes the \"global\"\n accuracy by default, which counts all labels or sub-samples separately. This can be\n changed to subset accuracy (which requires all labels or sub-samples in the sample to\n be correctly predicted) by setting ``subset_accuracy=True``.\n\n Accepts all input types listed in :ref:`references/modules:input types`.\n\n Args:\n num_classes:\n Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods.\n threshold:\n Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case\n of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities.\n average:\n Defines the reduction that is applied. Should be one of the following:\n\n - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes.\n - ``'macro'``: Calculate the metric for each class separately, and average the\n metrics across classes (with equal weights for each class).\n - ``'weighted'``: Calculate the metric for each class separately, and average the\n metrics across classes, weighting each class by its support (``tp + fn``).\n - ``'none'`` or ``None``: Calculate the metric for each class separately, and return\n the metric for every class.\n - ``'samples'``: Calculate the metric for each sample, and average the metrics\n across samples (with equal weights for each sample).\n\n .. note:: What is considered a sample in the multi-dimensional multi-class case\n depends on the value of ``mdmc_average``.\n\n .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`,\n the value for the class will be ``nan``.\n\n mdmc_average:\n Defines how averaging is done for multi-dimensional multi-class inputs (on top of the\n ``average`` parameter). Should be one of the following:\n\n - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional\n multi-class.\n\n - ``'samplewise'``: In this case, the statistics are computed separately for each\n sample on the ``N`` axis, and then averaged over samples.\n The computation for each sample is done by treating the flattened extra axes ``...``\n (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample,\n and computing the metric for the sample based on that.\n\n - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs\n (see :ref:`references/modules:input types`)\n are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they\n were ``(N_X, C)``. From here on the ``average`` parameter applies as usual.\n\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score, regardless of reduction method. If an index is ignored, and ``average=None``\n or ``'none'``, the score for the ignored class will be returned as ``nan``.\n\n top_k:\n Number of highest probability or logit score predictions considered to find the correct label,\n relevant only for (multi-dimensional) multi-class inputs. The\n default value (``None``) will be interpreted as 1 for these inputs.\n\n Should be left at default (``None``) for all other types of inputs.\n\n multiclass:\n Used only in certain special cases, where you want to treat inputs as a different type\n than what they appear to be. See the parameter's\n :ref:`documentation section <references/modules:using the multiclass parameter>`\n for a more detailed explanation and examples.\n\n subset_accuracy:\n Whether to compute subset accuracy for multi-label and multi-dimensional\n multi-class inputs (has no effect for other input types).\n\n - For multi-label inputs, if the parameter is set to ``True``, then all labels for\n each sample must be correctly predicted for the sample to count as correct. If it\n is set to ``False``, then all labels are counted separately - this is equivalent to\n flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``).\n\n - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all\n sub-sample (on the extra axis) must be correct for the sample to be counted as correct.\n If it is set to ``False``, then all sub-samples are counter separately - this is equivalent,\n in the case of label predictions, to flattening the inputs beforehand (i.e.\n ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter\n still applies in both cases, if set.\n\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False.\n\n .. deprecated:: v0.8\n Argument has no use anymore and will be removed v0.9.\n\n kwargs:\n Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ValueError:\n If ``top_k`` is not an ``integer`` larger than ``0``.\n ValueError:\n If ``average`` is none of ``\"micro\"``, ``\"macro\"``, ``\"weighted\"``, ``\"samples\"``, ``\"none\"``, ``None``.\n ValueError:\n If two different input modes are provided, eg. using ``multi-label`` with ``multi-class``.\n ValueError:\n If ``top_k`` parameter is set for ``multi-label`` inputs.\n\n Example:\n >>> import torch\n >>> from torchmetrics import Accuracy\n >>> target = torch.tensor([0, 1, 2, 3])\n >>> preds = torch.tensor([0, 2, 1, 3])\n >>> accuracy = Accuracy()\n >>> accuracy(preds, target)\n tensor(0.5000)\n\n >>> target = torch.tensor([0, 1, 2])\n >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]])\n >>> accuracy = Accuracy(top_k=2)\n >>> accuracy(preds, target)\n tensor(0.6667)\n\n \"\"\"\n is_differentiable = False\n higher_is_better = True\n correct: Tensor\n total: Tensor\n\n def __init__(\n self,\n threshold: float = 0.5,\n num_classes: Optional[int] = None,\n average: str = \"micro\",\n mdmc_average: Optional[str] = \"global\",\n ignore_index: Optional[int] = None,\n top_k: Optional[int] = None,\n multiclass: Optional[bool] = None,\n subset_accuracy: bool = False,\n compute_on_step: Optional[bool] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n allowed_average = [\"micro\", \"macro\", \"weighted\", \"samples\", \"none\", None]\n if average not in allowed_average:\n raise ValueError(f\"The `average` has to be one of {allowed_average}, got {average}.\")\n\n super().__init__(\n reduce=\"macro\" if average in [\"weighted\", \"none\", None] else average,\n mdmc_reduce=mdmc_average,\n threshold=threshold,\n top_k=top_k,\n num_classes=num_classes,\n multiclass=multiclass,\n ignore_index=ignore_index,\n compute_on_step=compute_on_step,\n **kwargs,\n )\n\n if top_k is not None and (not isinstance(top_k, int) or top_k <= 0):\n raise ValueError(f\"The `top_k` should be an integer larger than 0, got {top_k}\")\n\n self.average = average\n self.threshold = threshold\n self.top_k = top_k\n self.subset_accuracy = subset_accuracy\n self.mode: DataType = None # type: ignore\n self.multiclass = multiclass\n\n if self.subset_accuracy:\n self.add_state(\"correct\", default=tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets. See\n :ref:`references/modules:input types` for more information on input\n types.\n\n Args:\n preds: Predictions from model (logits, probabilities, or labels)\n target: Ground truth labels\n \"\"\"\n \"\"\" returns the mode of the data (binary, multi label, multi class, multi-dim multi class) \"\"\"\n mode = _mode(preds, target, self.threshold, self.top_k, self.num_classes, self.multiclass)\n\n if not self.mode:\n self.mode = mode\n elif self.mode != mode:\n raise ValueError(f\"You can not use {mode} inputs with {self.mode} inputs.\")\n\n if self.subset_accuracy and not _check_subset_validity(self.mode):\n self.subset_accuracy = False\n\n if self.subset_accuracy:\n correct, total = _subset_accuracy_update(preds, target, threshold=self.threshold, top_k=self.top_k)\n self.correct += correct\n self.total += total\n else:\n if not self.mode:\n raise RuntimeError(\"You have to have determined mode.\")\n tp, fp, tn, fn = _accuracy_update(\n preds,\n target,\n reduce=self.reduce,\n mdmc_reduce=self.mdmc_reduce,\n threshold=self.threshold,\n num_classes=self.num_classes,\n top_k=self.top_k,\n multiclass=self.multiclass,\n ignore_index=self.ignore_index,\n mode=self.mode,\n )\n\n # Update states\n if self.reduce != \"samples\" and self.mdmc_reduce != \"samplewise\":\n self.tp += tp\n self.fp += fp\n self.tn += tn\n self.fn += fn\n else:\n self.tp.append(tp)\n self.fp.append(fp)\n self.tn.append(tn)\n self.fn.append(fn)\n\n def compute(self) -> Tensor:\n \"\"\"Computes accuracy based on inputs passed in to ``update`` previously.\"\"\"\n if not self.mode:\n raise RuntimeError(\"You have to have determined mode.\")\n if self.subset_accuracy:\n return _subset_accuracy_compute(self.correct, self.total)\n tp, fp, tn, fn = self._get_final_stats()\n return _accuracy_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce, self.mode)\n"
]
| [
[
"torch.tensor"
]
]
|
solveforj/pandemic-central | [
"e295e52c16c35a7db069cc10fc0ebcc0eb7db18c"
]
| [
"publication/misc_stats.py"
]
| [
"import pandas as pd\nimport numpy as np\nimport glob\n\n__author__ = 'Duy Cao, Joseph Galasso'\n__copyright__ = '© Pandemic Central, 2021'\n__license__ = 'MIT'\n__status__ = 'release'\n__url__ = 'https://github.com/solveforj/pandemic-central'\n__version__ = '3.0.0'\n\nweeks = [1, 2, 3, 4]\ndates = [\"2020-11-01\", \"2020-11-08\", \"2020-11-15\", \"2020-11-22\", \"2020-11-29\",\"2020-12-06\", \"2020-12-13\", \"2020-12-20\", \"2020-12-27\", \"2021-01-03\", \"2021-01-10\"]\n\ndef rt_stats():\n # Compute total number of counties for which forecasts were generated by the random forest model\n z = []\n for i in dates:\n q = list(pd.read_csv(\"output/ReichLabFormat/publication/{i}-PandemicCentral-COVIDForest.csv\".format(i=i), dtype={'location':'str'})['location'].unique())\n z.append(q)\n elements_in_all = list(set.intersection(*map(set, z)))\n elements_in_all.sort()\n\n print(\"• Total forecasted counties by random forest\")\n print(len(elements_in_all))\n\n # Compute Rt-related statistics\n higher_corrs = pd.read_csv(\"publication/data/higher_corrs.csv\", dtype={'FIPS':'str'})\n higher_corrs = higher_corrs[higher_corrs['FIPS'].isin(elements_in_all)]\n population = pd.read_csv(\"data/census/census.csv\", dtype={'FIPS':'str'}, usecols=['FIPS', 'POP_DENSITY'])\n higher_corrs = pd.merge(left=higher_corrs, right=population, how='left', on=['FIPS'], copy=False)\n\n print(\"• Average shift\")\n print(higher_corrs['shift'].mean())\n state_higher_corrs = higher_corrs[higher_corrs['region'] == 'state']\n print(\"• Number of counties with higher state max correlation + their mean shift + their mean population density\")\n print(len(state_higher_corrs), state_higher_corrs['shift'].mean(), state_higher_corrs['POP_DENSITY'].mean())\n county_higher_corrs = higher_corrs[higher_corrs['region'] == 'county']\n print(\"• Number of counties with higher county max correlation + their mean shift\")\n print(len(county_higher_corrs), county_higher_corrs['shift'].mean())\n print(\"• Number of counties with max Pearson correlation < 0.50 (between cases and aligned Rt)\")\n print(len(higher_corrs[higher_corrs['correlation'] < 0.5]))\n print(\" • Average population density of the counties above\")\n print(higher_corrs[higher_corrs['correlation'] < 0.5]['POP_DENSITY'].mean())\n print(\"• Average population density of all counties\")\n print(higher_corrs['POP_DENSITY'].mean())\n print(\"\\n\")\n\n import matplotlib.pyplot as plt\n\n higher_corrs['POP_DENSITY'].plot.hist(bins=12)\n plt.show()\n\n\n\ndef model_R2_MAE_stats():\n\n files = glob.glob(\"output/model_stats/publication/*.csv\")\n\n final_df = pd.DataFrame()\n\n data = pd.read_csv(\"data/JHU/jhu_data.csv\")\n data = data[['FIPS', 'date', 'confirmed_cases']]\n data = data.rename({'confirmed_cases':'weekly_sum'}, axis=1)\n\n data['shift'] = [-7] * len(data)\n data['shift'] = pd.to_timedelta(data['shift'], unit='D')\n data['shift_date'] = pd.to_datetime(data['date']) + data['shift']\n data['shift_date'] = data['shift_date'].astype(str)\n data = data.drop(['shift'], axis=1)[['FIPS', 'shift_date', 'weekly_sum']]\n data = data.rename({'shift_date': 'date'}, axis=1).reset_index(drop=True)\n\n populations = pd.read_csv(\"data/census/census.csv\", usecols=['FIPS', 'TOT_POP'])\n jhu = pd.merge(left=data, right=populations, how='left', on=['FIPS'], copy=False)\n\n for i in files:\n df = pd.read_csv(i)\n df = df[df['model_type'] == \"mobility\"]\n final_df = pd.concat([final_df, df[['week', 'MAE_testing', 'MAE_training','R2_testing', 'R2_training']]], axis=0)\n\n graph_df = df[['week', 'MAE_testing']].reset_index(drop=True)\n graph_df['data_type'] = graph_df['week'].apply(lambda x : \"week_\" + str(x) + \"_mae\").reset_index(drop=True)\n graph_df = graph_df.drop(['week'], axis=1)\n graph_df = graph_df.rename(columns={'MAE_testing': 'cases_per_100k'})\n week_name = i.split(\"_\")[-1].split(\".\")[0]\n\n jhu_week = jhu[jhu['date'] == week_name]\n tot_cases = jhu_week['weekly_sum'].sum()\n tot_pop = jhu_week['TOT_POP'].sum()\n\n final_df_ = final_df.groupby(\"week\").agg('mean').round(2)\n final_df_ = final_df_.astype(str)\n final_std = final_df.groupby(\"week\").agg('std').round(2)\n final_std = final_std.astype(str)\n\n final_df_['MAE_testing'] += \"±\" + final_std['MAE_testing']\n final_df_['MAE_training'] += \"±\" + final_std['MAE_training']\n final_df_['R2_testing'] += \"±\" + final_std['R2_testing']\n final_df_['R2_training'] += \"±\" + final_std['R2_training']\n\n print(\"• Average MAE + R2 for training and validation datasets over 11 weeks\")\n print(final_df_)\n print(\"\\n\")\n\ndef case_increase_nov_dec():\n jhu_data = pd.read_csv(\"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv\").dropna()\n jhu_data['FIPS'] = jhu_data['FIPS'].astype(int).astype(str)\n\n jhu_data = jhu_data[jhu_data['FIPS'].notnull()]\n\n def process_FIPS(fips):\n missing_zeroes = \"0\" * (5-len(fips))\n return missing_zeroes + fips\n\n jhu_data['FIPS'] = jhu_data['FIPS'].apply(lambda x : process_FIPS(x))\n\n # Filter for non-errant counties\n fips_to_use = pd.read_csv(\"data/geodata/FIPS_used.csv\", dtype={'FIPS': 'str'})\n jhu_data = jhu_data[jhu_data['FIPS'].isin(fips_to_use['FIPS'].to_list())]\n\n jhu_data = jhu_data.drop([\"Admin2\",\"Province_State\",\"Country_Region\",\"Lat\",\"Long_\",\"Combined_Key\",\"UID\",\"iso2\",\"iso3\",\"code3\"], axis=1)\n jhu_data = jhu_data.melt(id_vars=['FIPS'], var_name = 'date', value_name = 'confirmed_cases')\n jhu_data['date'] = pd.to_datetime(jhu_data['date'])\n jhu_data = jhu_data.sort_values(['FIPS', 'date'])\n\n # Case counts are cumulative and will be converted into daily change\n jhu_data['confirmed_cases'] = jhu_data.groupby('FIPS')['confirmed_cases'].diff().dropna()\n cases = jhu_data\n cases = cases.sort_values(['FIPS','date'], axis=0)\n cases = cases.reset_index(drop=True)\n cases['date'] = cases['date'].astype(str)\n\n cases_nov_wk1 = cases[(cases['date'] >= \"2020-11-01\") & (cases['date'] <= \"2020-11-07\")]\n cases_nov_wk4 = cases[(cases['date'] >= \"2020-11-24\") & (cases['date'] <= \"2020-11-30\")]\n cases_nov_wk1 = cases_nov_wk1['confirmed_cases'].sum()\n cases_nov_wk4 = cases_nov_wk4['confirmed_cases'].sum()\n print(\"• Cases %Change from first to last week of November 2020\")\n print((cases_nov_wk4 - cases_nov_wk1) / cases_nov_wk1)\n print(\"\\n\")\n\n cases_dec_wk1 = cases[(cases['date'] >= \"2020-12-01\") & (cases['date'] <= \"2020-12-07\")]\n cases_dec_wk4 = cases[(cases['date'] >= \"2020-12-25\") & (cases['date'] <= \"2020-12-31\")]\n cases_dec_wk1 = cases_dec_wk1['confirmed_cases'].sum()\n cases_dec_wk4 = cases_dec_wk4['confirmed_cases'].sum()\n print(\"• Cases %Change from first to last week of December 2020\")\n print((cases_dec_wk4 - cases_dec_wk1) / cases_dec_wk1)\n print(\"\\n\")\n\ndef top_feature_importance_share():\n unfiltered_percents = []\n len_unfiltered = 0\n filter_list = ['prediction_aligned_int_7', 'prediction_aligned_int_14','prediction_aligned_int_21', 'prediction_aligned_int_28', \\\n 'rt_aligned_int_7', 'rt_aligned_int_14', 'rt_aligned_int_21', 'rt_aligned_int_28', \\\n 'test_positivity', 'totalTestResultsIncrease_norm']\n\n for i in weeks:\n ranking_dfs = []\n for j in dates:\n df_path = \"output/feature_ranking/publication/franking_\" + str(i) + \"_\" + j + \".csv\"\n read_df = pd.read_csv(df_path, index_col=0)\n ranking_dfs.append(read_df)\n ranking_df = pd.concat(ranking_dfs, axis=1)\n ranking_df.columns = range(ranking_df.shape[1])\n ranking_df['sum'] = ranking_df.sum(axis=1)\n total = ranking_df['sum'].sum()\n filtered_df = ranking_df[ranking_df.index.isin(filter_list)]\n len_unfiltered = len(ranking_df) - len(filtered_df)\n filtered_total = filtered_df['sum'].sum()\n unfiltered_percents.append((total-filtered_total)/total)\n\n print(\"• % of total feature permutation importance not for top 4 features + the # of these features\")\n print(np.mean(unfiltered_percents), len_unfiltered)\n print(\"\\n\")\n\ndef misc_stats():\n print(\"DISPLAYING MISCELLANEOUS STATISTICS IN MANUSCRIPT\\n\")\n rt_stats()\n model_R2_MAE_stats()\n case_increase_nov_dec()\n top_feature_importance_share()\n"
]
| [
[
"pandas.to_datetime",
"pandas.merge",
"pandas.DataFrame",
"pandas.to_timedelta",
"numpy.mean",
"pandas.concat",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
Maria-philna/unilm | [
"5550a335c6d2ae5838b1a90e50cb46f81edcd50f"
]
| [
"infoxlm/fairseq/fairseq/modules/lightweight_convolution.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq import utils\nfrom fairseq.modules.unfold import unfold1d\n\n\ndef LightweightConv(input_size, kernel_size=1, padding_l=None, num_heads=1,\n weight_dropout=0., weight_softmax=False, bias=False):\n if torch.cuda.is_available():\n try:\n from fairseq.modules.lightconv_layer import LightconvLayer\n return LightconvLayer(input_size, kernel_size=kernel_size,\n padding_l=padding_l, num_heads=num_heads,\n weight_dropout=weight_dropout,\n weight_softmax=weight_softmax, bias=bias)\n except ImportError as e:\n print(e)\n return LightweightConv1dTBC(input_size, kernel_size=kernel_size,\n padding_l=padding_l, num_heads=num_heads,\n weight_dropout=weight_dropout,\n weight_softmax=weight_softmax, bias=bias)\n\n\nclass LightweightConv1d(nn.Module):\n '''Lightweight Convolution assuming the input is BxCxT\n This is just an example that explains LightConv clearer than the TBC version.\n We don't use this module in the model.\n\n Args:\n input_size: # of channels of the input and output\n kernel_size: convolution channels\n padding: padding\n num_heads: number of heads used. The weight is of shape\n `(num_heads, 1, kernel_size)`\n weight_softmax: normalize the weight with softmax before the convolution\n\n Shape:\n Input: BxCxT, i.e. (batch_size, input_size, timesteps)\n Output: BxCxT, i.e. (batch_size, input_size, timesteps)\n\n Attributes:\n weight: the learnable weights of the module of shape\n `(num_heads, 1, kernel_size)`\n bias: the learnable bias of the module of shape `(input_size)`\n '''\n\n def __init__(self, input_size, kernel_size=1, padding=0, num_heads=1,\n weight_softmax=False, bias=False, weight_dropout=0.):\n super().__init__()\n self.input_size = input_size\n self.kernel_size = kernel_size\n self.num_heads = num_heads\n self.padding = padding\n self.weight_softmax = weight_softmax\n self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))\n\n if bias:\n self.bias = nn.Parameter(torch.Tensor(input_size))\n else:\n self.bias = None\n self.weight_dropout = weight_dropout\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.weight)\n if self.bias is not None:\n nn.init.constant_(self.bias, 0.)\n\n def forward(self, input):\n '''\n input size: B x C x T\n output size: B x C x T\n '''\n B, C, T = input.size()\n H = self.num_heads\n\n weight = self.weight\n if self.weight_softmax:\n weight = F.softmax(weight, dim=-1)\n\n weight = F.dropout(weight, self.weight_dropout, training=self.training)\n # Merge every C/H entries into the batch dimension (C = self.input_size)\n # B x C x T -> (B * C/H) x H x T\n # One can also expand the weight to C x 1 x K by a factor of C/H\n # and do not reshape the input instead, which is slow though\n input = input.view(-1, H, T)\n output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)\n output = output.view(B, C, T)\n if self.bias is not None:\n output = output + self.bias.view(1, -1, 1)\n\n return output\n\n\nclass LightweightConv1dTBC(nn.Module):\n '''Lightweight Convolution assuming the input is TxBxC\n Args:\n input_size: # of channels of the input\n kernel_size: convolution channels\n padding_l: padding to the left when using \"same\" padding\n num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)\n weight_dropout: the drop rate of the DropConnect to drop the weight\n weight_softmax: normalize the weight with softmax before the convolution\n bias: use bias\n\n Shape:\n Input: TxBxC, i.e. (timesteps, batch_size, input_size)\n Output: TxBxC, i.e. (timesteps, batch_size, input_size)\n\n Attributes:\n weight: the learnable weights of the module of shape\n `(num_heads, 1, kernel_size)`\n bias: the learnable bias of the module of shape `(input_size)`\n '''\n def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1,\n weight_dropout=0., weight_softmax=False, bias=False):\n super().__init__()\n self.input_size = input_size\n self.kernel_size = kernel_size\n self.padding_l = padding_l\n self.num_heads = num_heads\n self.weight_dropout = weight_dropout\n self.weight_softmax = weight_softmax\n\n self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(input_size))\n else:\n self.bias = None\n\n self.reset_parameters()\n\n self.onnx_trace = False\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.weight)\n if self.bias is not None:\n nn.init.constant_(self.bias, 0.)\n\n def forward(self, x, incremental_state=None, unfold=False):\n '''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C\n args:\n x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)\n incremental_state: A dict to keep the state\n unfold: unfold the input or not. If not, we use the matrix trick instead\n '''\n unfold = unfold or (incremental_state is not None)\n\n if unfold:\n output = self._forward_unfolded(x, incremental_state)\n else:\n output = self._forward_expanded(x, incremental_state)\n\n if self.bias is not None:\n output = output + self.bias.view(1, 1, -1)\n return output\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def _forward_unfolded(self, x, incremental_state):\n '''The conventional implementation of convolutions.\n Unfolding the input by having a window shifting to the right.'''\n T, B, C = x.size()\n K, H = self.kernel_size, self.num_heads\n R = C // H\n assert R * H == C == self.input_size\n\n weight = self.weight.view(H, K)\n if incremental_state is not None:\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is None:\n input_buffer = x.new()\n x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)\n if self.kernel_size > 1:\n self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:])\n x_unfold = x_unfold.view(T*B*H, R, -1)\n else:\n # unfold the input: T x B x C --> T' x B x C x K\n x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0)\n x_unfold = x_unfold.view(T*B*H, R, K)\n\n if self.weight_softmax:\n weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight)\n\n if incremental_state is not None:\n weight = weight[:, -x_unfold.size(2):]\n K = weight.size(1)\n\n weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1)\n\n weight = F.dropout(weight, self.weight_dropout, training=self.training)\n output = torch.bmm(x_unfold, weight) # T*B*H x R x 1\n output = output.view(T, B, C)\n return output\n\n def _forward_expanded(self, x, incremental_state):\n '''Turn the convolution filters into band matrices and do matrix multiplication.\n This is faster when the sequence is short, but less memory efficient.\n This is not used in the decoder during inference.\n '''\n T, B, C = x.size()\n K, H = self.kernel_size, self.num_heads\n R = C // H\n assert R * H == C == self.input_size\n\n weight = self.weight.view(H, K)\n if self.weight_softmax:\n weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight)\n weight = weight.view(1, H, K).expand(T*B, H, K).contiguous()\n weight = weight.view(T, B*H, K).transpose(0, 1)\n\n x = x.view(T, B*H, R).transpose(0, 1)\n P = self.padding_l\n if K > T and P == K-1:\n weight = weight.narrow(2, K-T, T)\n K, P = T, T-1\n # turn the convolution filters into band matrices\n weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False)\n weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)\n weight_expanded = weight_expanded.narrow(2, P, T)\n weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training)\n\n output = torch.bmm(weight_expanded, x)\n output = output.transpose(0, 1).contiguous().view(T, B, C)\n return output\n\n def reorder_incremental_state(self, incremental_state, new_order):\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n input_buffer = input_buffer.index_select(1, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n def _get_input_buffer(self, incremental_state):\n return utils.get_incremental_state(self, incremental_state, 'input_buffer')\n\n def _set_input_buffer(self, incremental_state, new_buffer):\n return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)\n\n def extra_repr(self):\n s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}'.format(\n self.input_size, self.kernel_size, self.padding_l,\n self.num_heads, self.weight_softmax, self.bias is not None\n )\n if self.weight_dropout > 0.:\n s += ', weight_dropout={}'.format(self.weight_dropout)\n return s\n"
]
| [
[
"torch.nn.init.constant_",
"torch.nn.functional.dropout",
"torch.nn.init.xavier_uniform_",
"torch.bmm",
"torch.nn.functional.conv1d",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"torch.Tensor"
]
]
|
Abhi-Tiw1/ML-pipeline | [
"a77e4ae5edc2b84564cae721338432c2a7251bcc"
]
| [
"ml_pipeline_example.py"
]
| [
"\n\"\"\"\nExample code\nCode implements the ml pipeline on the iris dataset to do binary calssification\n\"\"\"\n\nfrom sklearn import datasets\nfrom ml_pipeline import *\n\ndef run_ml_pipeline(X, y, fs_alg, nof, feats, clfrs, samp_type, no_out_vec, no_iters=50):\n\t\"\"\"Calls get_cv_out for X number of iternation and saves the results \"\"\"\n\t\n\t#code gets the average cv values and other things for all the different\n\ttot_metric=np.empty((0,no_out_vec))\n\tf_names_tot=np.empty((0))\n\tno_iters=50\n\t#output for a single fold --> repeated 50 times\n\tfor rseed in range(0,no_iters,1):\n\t\tf_names,out_metric=get_cv_out(X,y,fs_alg,nof,np.array(feats),clfrs,samp_type,rseed)\n\t\ttot_metric=np.vstack((tot_metric,out_metric))\n\t\tf_names_tot=np.hstack((f_names_tot,f_names))\n\t\t\n\treturn f_names_tot, tot_metric\n\t\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n#making the problem binary\nX=X[:100,:]\ny=y[:100]\ntarget_names = iris.target_names\n\n#feature names\nfeats=np.array(['1','2','3','4'])\n#sampling type\nsamp_type='none'\n#number of features given as 2 out of 4\nnof=2\n#classifiers to use\nclfrs=['svm_rbf', 'svm_lnr','rf20', 'knn10','lr'] \n# feature selection algorithm - recurcive feature elimination \nfs_alg='rfe'\n#metric names for performance measurement\nmetrics_= ['bacc','f1','mcc','pre','rec','coh']\nno_metrics=len(metrics_)\n#output column names --> classifier + metric\nout_col_perfold=get_out_col_names(clfrs,metrics_)\n\n#final colum which also stores informaiton about balance of dataset\ncols = out_col_perfold+['balance']\n\nno_fin_cols=len(cols)\n#open the design matrix\nfin_arr_out=np.empty((0,no_fin_cols))\n\nprint('Balance is',np.round(np.mean(y),3))\nprint('Shape of arrays is ',X.shape,y.shape,'\\n--------------')\n\nno_out_vec=len(out_col_perfold)\n\nf_names_tot, tot_metric= run_ml_pipeline(X,y,fs_alg,nof,feats,clfrs, samp_type, no_out_vec)\n#saves the feature analysis for this feature selection aglo\noutpath_fin='./ml_pipeline_out/'\nif not os.path.exists(outpath_fin):\n\tos.makedirs(outpath_fin)\nout_vec=save_perfold_res(tot_metric,out_col_perfold,f_names_tot,outpath_fin)\nout_vec=np.round(out_vec,3)\nout_vec=np.hstack((out_vec,np.round(np.mean(y),3)))\n#results for a given pwl level and feat selection method\nfin_arr_out=np.vstack((fin_arr_out,out_vec))\n\t\n#saving for all pwls and given lobe\nfnm_all='output_fin.csv'\nsave_df_results(fin_arr_out,cols,outpath_fin,fnm_all)\t\n\n\n\n\n"
]
| [
[
"sklearn.datasets.load_iris"
]
]
|
guoanjie/CS224n | [
"ef80661aa7dc1fb5540a824b2f0571d7e357bb83"
]
| [
"a5/highway.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2018-19: Homework 5\n\"\"\"\n\n### YOUR CODE HERE for part 1h\nimport torch\nimport torch.nn as nn\n\n\nclass Highway(nn.Module):\n def __init__(self, features):\n super(Highway, self).__init__()\n self.proj = nn.Linear(in_features=features, out_features=features)\n self.gate = nn.Linear(in_features=features, out_features=features)\n\n def forward(self, x_conv_out: torch.Tensor) -> torch.Tensor:\n x_proj = torch.relu(self.proj(x_conv_out))\n x_gate = torch.sigmoid(self.proj(x_conv_out))\n return x_gate * x_proj + (1 - x_gate) * x_conv_out\n### END YOUR CODE \n\n"
]
| [
[
"torch.nn.Linear"
]
]
|
erum-omdena/ai-challenge-mars | [
"906bf82a7b647a0a430fc0caca18cb19806056e7"
]
| [
"Mask_RCNN/mrcnn/visualize.py"
]
| [
"\"\"\"\nMask R-CNN\nDisplay and Visualization Functions.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport itertools\nimport colorsys\n\nimport numpy as np\nfrom skimage.measure import find_contours\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, lines\nfrom matplotlib.patches import Polygon\nimport IPython.display\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\n\n\n############################################################\n# Visualization\n############################################################\n\ndef display_images(images, titles=None, cols=4, cmap=None, norm=None,\n interpolation=None):\n \"\"\"Display the given set of images, optionally with titles.\n images: list or array of image tensors in HWC format.\n titles: optional. A list of titles to display with each image.\n cols: number of images per row\n cmap: Optional. Color map to use. For example, \"Blues\".\n norm: Optional. A Normalize instance to map values to colors.\n interpolation: Optional. Image interpolation to use for display.\n \"\"\"\n titles = titles if titles is not None else [\"\"] * len(images)\n rows = len(images) // cols + 1\n plt.figure(figsize=(14, 14 * rows // cols))\n i = 1\n for image, title in zip(images, titles):\n plt.subplot(rows, cols, i)\n plt.title(title, fontsize=9)\n plt.axis('off')\n plt.imshow(image.astype(np.uint8), cmap=cmap,\n norm=norm, interpolation=interpolation)\n i += 1\n plt.show()\n\n\ndef random_colors(N, bright=True):\n \"\"\"\n Generate random colors.\n To get visually distinct colors, generate them in HSV space then\n convert to RGB.\n \"\"\"\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image\n\n\ndef display_instances(image, boxes, masks, class_ids, class_names,\n scores=None, title=\"\",\n figsize=(16, 16), ax=None,\n show_mask=True, show_bbox=True,\n colors=None, captions=None):\n \"\"\"\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [height, width, num_instances]\n class_ids: [num_instances]\n class_names: list of class names of the dataset\n scores: (optional) confidence scores for each box\n title: (optional) Figure title\n show_mask, show_bbox: To show masks and bounding boxes or not\n figsize: (optional) the size of the image\n colors: (optional) An array or colors to use with each object\n captions: (optional) A list of strings to use as captions for each object\n \"\"\"\n # Number of instances\n N = boxes.shape[0]\n if not N:\n print(\"\\n*** No instances to display *** \\n\")\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n # If no axis is passed, create one and automatically call show()\n auto_show = False\n if not ax:\n _, ax = plt.subplots(1, figsize=figsize)\n auto_show = True\n\n # Generate random colors\n colors = colors or random_colors(N)\n\n # Show area outside image boundaries.\n height, width = image.shape[:2]\n ax.set_ylim(height + 10, -10)\n ax.set_xlim(-10, width + 10)\n ax.axis('off')\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n color = colors[i]\n\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n if show_bbox:\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=0.7, linestyle=\"dashed\",\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Label\n if not captions:\n class_id = class_ids[i]\n print(\"i\", i)\n print(\"class_ids\", class_ids)\n print(class_names)\n score = scores[i] if scores is not None else None\n label = class_names[class_id]\n caption = \"{} {:.3f}\".format(label, score) if score else label\n else:\n caption = captions[i]\n ax.text(x1, y1 + 8, caption,\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n masked_image = apply_mask(masked_image, mask, color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n if auto_show:\n plt.show()\n\n\ndef display_differences(image,\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n class_names, title=\"\", ax=None,\n show_mask=True, show_box=True,\n iou_threshold=0.5, score_threshold=0.5):\n \"\"\"Display ground truth and prediction instances on the same image.\"\"\"\n # Match predictions to ground truth\n gt_match, pred_match, overlaps = utils.compute_matches(\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold, score_threshold=score_threshold)\n # Ground truth = green. Predictions = red\n colors = [(0, 1, 0, .8)] * len(gt_match)\\\n + [(1, 0, 0, 1)] * len(pred_match)\n # Concatenate GT and predictions\n class_ids = np.concatenate([gt_class_id, pred_class_id])\n scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])\n boxes = np.concatenate([gt_box, pred_box])\n masks = np.concatenate([gt_mask, pred_mask], axis=-1)\n # Captions per instance show score/IoU\n captions = [\"\" for m in gt_match] + [\"{:.2f} / {:.2f}\".format(\n pred_score[i],\n (overlaps[i, int(pred_match[i])]\n if pred_match[i] > -1 else overlaps[i].max()))\n for i in range(len(pred_match))]\n # Set title if not provided\n title = title or \"Ground Truth and Detections\\n GT=green, pred=red, captions: score/IoU\"\n # Display\n display_instances(\n image,\n boxes, masks, class_ids,\n class_names, scores, ax=ax,\n show_bbox=show_box, show_mask=show_mask,\n colors=colors, captions=captions,\n title=title)\n\n\ndef draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):\n \"\"\"\n anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.\n proposals: [n, 4] the same anchors but refined to fit objects better.\n \"\"\"\n masked_image = image.copy()\n\n # Pick random anchors in case there are too many.\n ids = np.arange(rois.shape[0], dtype=np.int32)\n ids = np.random.choice(\n ids, limit, replace=False) if ids.shape[0] > limit else ids\n\n fig, ax = plt.subplots(1, figsize=(12, 12))\n if rois.shape[0] > limit:\n plt.title(\"Showing {} random ROIs out of {}\".format(\n len(ids), rois.shape[0]))\n else:\n plt.title(\"{} ROIs\".format(len(ids)))\n\n # Show area outside image boundaries.\n ax.set_ylim(image.shape[0] + 20, -20)\n ax.set_xlim(-50, image.shape[1] + 20)\n ax.axis('off')\n\n for i, id in enumerate(ids):\n color = np.random.rand(3)\n class_id = class_ids[id]\n # ROI\n y1, x1, y2, x2 = rois[id]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n edgecolor=color if class_id else \"gray\",\n facecolor='none', linestyle=\"dashed\")\n ax.add_patch(p)\n # Refined ROI\n if class_id:\n ry1, rx1, ry2, rx2 = refined_rois[id]\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal for easy visualization\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Label\n label = class_names[class_id]\n ax.text(rx1, ry1 + 8, \"{}\".format(label),\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n m = utils.unmold_mask(mask[id], rois[id]\n [:4].astype(np.int32), image.shape)\n masked_image = apply_mask(masked_image, m, color)\n\n ax.imshow(masked_image)\n\n # Print stats\n print(\"Positive ROIs: \", class_ids[class_ids > 0].shape[0])\n print(\"Negative ROIs: \", class_ids[class_ids == 0].shape[0])\n print(\"Positive Ratio: {:.2f}\".format(\n class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))\n\n\n# TODO: Replace with matplotlib equivalent?\ndef draw_box(image, box, color):\n \"\"\"Draw 3-pixel width bounding boxes on the given image array.\n color: list of 3 int values for RGB.\n \"\"\"\n y1, x1, y2, x2 = box\n image[y1:y1 + 2, x1:x2] = color\n image[y2:y2 + 2, x1:x2] = color\n image[y1:y2, x1:x1 + 2] = color\n image[y1:y2, x2:x2 + 2] = color\n return image\n\n\ndef display_top_masks(image, mask, class_ids, class_names, limit=4):\n \"\"\"Display the given image and the top few class masks.\"\"\"\n to_display = []\n titles = []\n to_display.append(image)\n titles.append(\"H x W={}x{}\".format(image.shape[0], image.shape[1]))\n # Pick top prominent classes in this image\n unique_class_ids = np.unique(class_ids)\n mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])\n for i in unique_class_ids]\n top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),\n key=lambda r: r[1], reverse=True) if v[1] > 0]\n # Generate images and titles\n for i in range(limit):\n class_id = top_ids[i] if i < len(top_ids) else -1\n # Pull masks of instances belonging to the same class.\n m = mask[:, :, np.where(class_ids == class_id)[0]]\n m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)\n to_display.append(m)\n titles.append(class_names[class_id] if class_id != -1 else \"-\")\n display_images(to_display, titles=titles, cols=limit + 1, cmap=\"Blues_r\")\n\n\ndef plot_precision_recall(AP, precisions, recalls):\n \"\"\"Draw the precision-recall curve.\n\n AP: Average precision at IoU >= 0.5\n precisions: list of precision values\n recalls: list of recall values\n \"\"\"\n # Plot the Precision-Recall curve\n _, ax = plt.subplots(1)\n ax.set_title(\"Precision-Recall Curve. AP@50 = {:.3f}\".format(AP))\n ax.set_ylim(0, 1.1)\n ax.set_xlim(0, 1.1)\n _ = ax.plot(recalls, precisions)\n\n\ndef plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,\n overlaps, class_names, threshold=0.5):\n \"\"\"Draw a grid showing how ground truth objects are classified.\n gt_class_ids: [N] int. Ground truth class IDs\n pred_class_id: [N] int. Predicted class IDs\n pred_scores: [N] float. The probability scores of predicted classes\n overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.\n class_names: list of all class names in the dataset\n threshold: Float. The prediction probability required to predict a class\n \"\"\"\n gt_class_ids = gt_class_ids[gt_class_ids != 0]\n pred_class_ids = pred_class_ids[pred_class_ids != 0]\n\n plt.figure(figsize=(12, 10))\n plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)\n plt.yticks(np.arange(len(pred_class_ids)),\n [\"{} ({:.2f})\".format(class_names[int(id)], pred_scores[i])\n for i, id in enumerate(pred_class_ids)])\n plt.xticks(np.arange(len(gt_class_ids)),\n [class_names[int(id)] for id in gt_class_ids], rotation=90)\n\n thresh = overlaps.max() / 2.\n for i, j in itertools.product(range(overlaps.shape[0]),\n range(overlaps.shape[1])):\n text = \"\"\n if overlaps[i, j] > threshold:\n text = \"match\" if gt_class_ids[j] == pred_class_ids[i] else \"wrong\"\n color = (\"white\" if overlaps[i, j] > thresh\n else \"black\" if overlaps[i, j] > 0\n else \"grey\")\n plt.text(j, i, \"{:.3f}\\n{}\".format(overlaps[i, j], text),\n horizontalalignment=\"center\", verticalalignment=\"center\",\n fontsize=9, color=color)\n\n plt.tight_layout()\n plt.xlabel(\"Ground Truth\")\n plt.ylabel(\"Predictions\")\n\n\ndef draw_boxes(image, boxes=None, refined_boxes=None,\n masks=None, captions=None, visibilities=None,\n title=\"\", ax=None):\n \"\"\"Draw bounding boxes and segmentation masks with different\n customizations.\n\n boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.\n refined_boxes: Like boxes, but draw with solid lines to show\n that they're the result of refining 'boxes'.\n masks: [N, height, width]\n captions: List of N titles to display on each box\n visibilities: (optional) List of values of 0, 1, or 2. Determine how\n prominent each bounding box should be.\n title: An optional title to show over the image\n ax: (optional) Matplotlib axis to draw on.\n \"\"\"\n # Number of boxes\n assert boxes is not None or refined_boxes is not None\n N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]\n\n # Matplotlib Axis\n if not ax:\n _, ax = plt.subplots(1, figsize=(12, 12))\n\n # Generate random colors\n colors = random_colors(N)\n\n # Show area outside image boundaries.\n margin = image.shape[0] // 10\n ax.set_ylim(image.shape[0] + margin, -margin)\n ax.set_xlim(-margin, image.shape[1] + margin)\n ax.axis('off')\n\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n # Box visibility\n visibility = visibilities[i] if visibilities is not None else 1\n if visibility == 0:\n color = \"gray\"\n style = \"dotted\"\n alpha = 0.5\n elif visibility == 1:\n color = colors[i]\n style = \"dotted\"\n alpha = 1\n elif visibility == 2:\n color = colors[i]\n style = \"solid\"\n alpha = 1\n\n # Boxes\n if boxes is not None:\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=alpha, linestyle=style,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Refined boxes\n if refined_boxes is not None and visibility > 0:\n ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal\n if boxes is not None:\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Captions\n if captions is not None:\n caption = captions[i]\n # If there are refined boxes, display captions on them\n if refined_boxes is not None:\n y1, x1, y2, x2 = ry1, rx1, ry2, rx2\n ax.text(x1, y1, caption, size=11, verticalalignment='top',\n color='w', backgroundcolor=\"none\",\n bbox={'facecolor': color, 'alpha': 0.5,\n 'pad': 2, 'edgecolor': 'none'})\n\n # Masks\n if masks is not None:\n mask = masks[:, :, i]\n masked_image = apply_mask(masked_image, mask, color)\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n\n\ndef display_table(table):\n \"\"\"Display values in a table format.\n table: an iterable of rows, and each row is an iterable of values.\n \"\"\"\n html = \"\"\n for row in table:\n row_html = \"\"\n for col in row:\n row_html += \"<td>{:40}</td>\".format(str(col))\n html += \"<tr>\" + row_html + \"</tr>\"\n html = \"<table>\" + html + \"</table>\"\n IPython.display.display(IPython.display.HTML(html))\n\n\ndef display_weight_stats(model):\n \"\"\"Scans all the weights in the model and returns a list of tuples\n that contain stats about each weight.\n \"\"\"\n layers = model.get_trainable_layers()\n table = [[\"WEIGHT NAME\", \"SHAPE\", \"MIN\", \"MAX\", \"STD\"]]\n for l in layers:\n weight_values = l.get_weights() # list of Numpy arrays\n weight_tensors = l.weights # list of TF tensors\n for i, w in enumerate(weight_values):\n weight_name = weight_tensors[i].name\n # Detect problematic layers. Exclude biases of conv layers.\n alert = \"\"\n if w.min() == w.max() and not (l.__class__.__name__ == \"Conv2D\" and i == 1):\n alert += \"<span style='color:red'>*** dead?</span>\"\n if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:\n alert += \"<span style='color:red'>*** Overflow?</span>\"\n # Add row\n table.append([\n weight_name + alert,\n str(w.shape),\n \"{:+9.4f}\".format(w.min()),\n \"{:+10.4f}\".format(w.max()),\n \"{:+9.4f}\".format(w.std()),\n ])\n display_table(table)\n"
]
| [
[
"numpy.random.choice",
"numpy.random.rand",
"numpy.where",
"matplotlib.patches.Rectangle",
"numpy.concatenate",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.lines.Line2D",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.fliplr",
"matplotlib.pyplot.xlabel",
"numpy.any",
"matplotlib.pyplot.ylabel",
"numpy.unique",
"matplotlib.pyplot.imshow"
]
]
|
leroidauphin/probability | [
"62e69c0221634480e1f9520144be6a5652761b41"
]
| [
"tensorflow_probability/python/distributions/joint_distribution_coroutine.py"
]
| [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The `JointDistributionCoroutine` class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import joint_distribution as joint_distribution_lib\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import structural_tuple\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n\n__all__ = [\n 'JointDistributionCoroutine',\n]\n\n\nclass JointDistributionCoroutine(joint_distribution_lib.JointDistribution):\n \"\"\"Joint distribution parameterized by a distribution-making generator.\n\n This distribution enables both sampling and joint probability computation from\n a single model specification.\n\n A joint distribution is a collection of possibly interdependent distributions.\n The `JointDistributionCoroutine` is specified by a generator that\n generates the elements of this collection.\n\n #### Mathematical Details\n\n The `JointDistributionCoroutine` implements the chain rule of probability.\n That is, the probability function of a length-`d` vector `x` is,\n\n ```none\n p(x) = prod{ p(x[i] | x[:i]) : i = 0, ..., (d - 1) }\n ```\n\n The `JointDistributionCoroutine` is parameterized by a generator\n that yields `tfp.distributions.Distribution`-like instances.\n\n Each element yielded implements the `i`-th *full conditional distribution*,\n `p(x[i] | x[:i])`. Within the generator, the return value from the yield\n is a sample from the distribution that may be used to construct subsequent\n yielded `Distribution`-like instances. This allows later instances\n to be conditional on earlier ones.\n\n When the `sample` method for a `JointDistributionCoroutine` is called with\n a `sample_shape`, the `sample` method for each of the yielded\n distributions is called.\n The distributions that have been wrapped in the\n `JointDistributionCoroutine.Root` class will be called with `sample_shape` as\n the `sample_shape` argument, and the unwrapped distributions\n will be called with `()` as the `sample_shape` argument.\n\n It is the user's responsibility to ensure that\n each of the distributions generates samples with the specified sample\n size.\n\n **Name resolution**: The names of `JointDistributionCoroutine` components\n may be specified by passing `name` arguments to distribution constructors (\n `tfd.Normal(0., 1., name='x')). Components without an explicit name will be\n assigned a dummy name.\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Consider the following generative model:\n # e ~ Exponential(rate=[100, 120])\n # g ~ Gamma(concentration=e[0], rate=e[1])\n # n ~ Normal(loc=0, scale=2.)\n # m ~ Normal(loc=n, scale=g)\n\n # In TFP, we can write this as:\n Root = tfd.JointDistributionCoroutine.Root # Convenient alias.\n def model():\n e = yield Root(tfd.Independent(tfd.Exponential(rate=[100, 120]), 1))\n g = yield tfd.Gamma(concentration=e[..., 0], rate=e[..., 1])\n n = yield Root(tfd.Normal(loc=0, scale=2.))\n m = yield tfd.Normal(loc=n, scale=g)\n\n joint = tfd.JointDistributionCoroutine(model)\n\n x = joint.sample()\n # ==> x is a length-4 tuple of Tensors representing a draw/realization from\n # each distribution.\n joint.log_prob(x)\n # ==> A scalar `Tensor` representing the total log prob under all four\n # distributions.\n ```\n\n For improved readability of sampled values, the yielded distributions can also\n be named:\n\n ```python\n tfd = tfp.distributions\n\n Root = tfd.JointDistributionCoroutine.Root # Convenient alias.\n def model():\n e = yield Root(tfd.Independent(\n tfd.Exponential(rate=[100, 120], name='e'), 1))\n g = yield tfd.Gamma(concentration=e[..., 0], rate=e[..., 1], name='g')\n n = yield Root(tfd.Normal(loc=0, scale=2., name='n'))\n m = yield tfd.Normal(loc=n, scale=g, name='m')\n\n joint = tfd.JointDistributionCoroutine(model)\n\n x = joint.sample()\n # ==> x is a namedtuple with fields (in order) 'e', 'g', 'n', 'm' and values\n # representing the draw/realization from each corresponding distribution.\n joint.log_prob(x)\n # ==> A scalar `Tensor` representing the total log prob under all four\n # distributions.\n\n # Passing dictionaries via `kwargs` also works.\n joint.log_prob(**x._as_dict())\n # Or:\n joint.log_prob(e=..., g=..., n=..., m=...)\n ```\n\n If any of the yielded distributions are not explicitly named, they will\n automatically be given a name of the form `var#` where `#` is the index of the\n associated distribution. E.g. the first yielded distribution will have a\n default name of `var0`.\n\n\n #### Discussion\n\n Each element yielded by the generator must be a `tfd.Distribution`-like\n instance.\n\n An object is deemed '`tfd.Distribution`-like' if it has a\n `sample`, `log_prob`, and distribution properties, e.g., `batch_shape`,\n `event_shape`, `dtype`.\n\n Consider the following fragment from a generator:\n\n ```python\n n = yield Root(tfd.Normal(loc=0, scale=2.))\n m = yield tfd.Normal(loc=n, scale=1.0)\n ```\n\n The random variable `n` has no dependence on earlier random variables and\n `Root` is used to indicate that its distribution needs to be passed a\n `sample_shape`. On the other hand, the distribution of `m` is constructed\n using the value of `n`. This means that `n` is already shaped according to\n the `sample_shape` and there is no need to pass `m`'s distribution a\n `sample_size`. So `Root` is not used to wrap `m`'s distribution.\n\n **Note**: unlike most other distributions in `tfp.distributions`,\n `JointDistributionCoroutine.sample` returns a `tuple` of `Tensor`s\n rather than a `Tensor`. Accordingly `joint.batch_shape` returns a\n `tuple` of `TensorShape`s for each of the distributions' batch shapes\n and `joint.batch_shape_tensor()` returns a `tuple` of `Tensor`s for\n each of the distributions' event shapes. (Same with `event_shape` analogues.)\n \"\"\"\n\n class Root(collections.namedtuple('Root', ['distribution'])):\n \"\"\"Wrapper for coroutine distributions which lack distribution parents.\"\"\"\n __slots__ = ()\n\n def __init__(\n self,\n model,\n sample_dtype=None,\n validate_args=False,\n name=None,\n ):\n \"\"\"Construct the `JointDistributionCoroutine` distribution.\n\n Args:\n model: A generator that yields a sequence of `tfd.Distribution`-like\n instances.\n sample_dtype: Samples from this distribution will be structured like\n `tf.nest.pack_sequence_as(sample_dtype, list_)`. `sample_dtype` is only\n used for `tf.nest.pack_sequence_as` structuring of outputs, never\n casting (which is the responsibility of the component distributions).\n Default value: `None` (i.e. `namedtuple`).\n validate_args: Python `bool`. Whether to validate input with asserts.\n If `validate_args` is `False`, and the inputs are invalid,\n correct behavior is not guaranteed.\n Default value: `False`.\n name: The name for ops managed by the distribution.\n Default value: `None` (i.e., `JointDistributionCoroutine`).\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name or 'JointDistributionCoroutine') as name:\n self._sample_dtype = sample_dtype\n self._model_coroutine = model\n self._single_sample_distributions = {}\n super(JointDistributionCoroutine, self).__init__(\n dtype=sample_dtype,\n reparameterization_type=None, # Ignored; we'll override.\n validate_args=validate_args,\n allow_nan_stats=False,\n parameters=parameters,\n name=name)\n\n @property\n def _require_root(self):\n return True\n\n @property\n def model(self):\n return self._model_coroutine\n\n def _assert_compatible_shape(self, index, sample_shape, samples):\n requested_shape, _ = self._expand_sample_shape_to_vector(\n tf.convert_to_tensor(sample_shape, dtype=tf.int32),\n name='requested_shape')\n actual_shape = prefer_static.shape(samples)\n actual_rank = prefer_static.rank_from_shape(actual_shape)\n requested_rank = prefer_static.rank_from_shape(requested_shape)\n\n # We test for two properties we expect of yielded distributions:\n # (1) The rank of the tensor of generated samples must be at least\n # as large as the rank requested.\n # (2) The requested shape must be a prefix of the shape of the\n # generated tensor of samples.\n # We attempt to perform test (1) statically first.\n # We don't need to do this explicitly for test (2) because\n # `assert_equal` evaluates statically if it can.\n static_actual_rank = tf.get_static_value(actual_rank)\n static_requested_rank = tf.get_static_value(requested_rank)\n\n assertion_message = ('Samples yielded by distribution #{} are not '\n 'consistent with `sample_shape` passed to '\n '`JointDistributionCoroutine` '\n 'distribution.'.format(index))\n\n # TODO Remove this static check (b/138738650)\n if (static_actual_rank is not None and\n static_requested_rank is not None):\n # We're able to statically check the rank\n if static_actual_rank < static_requested_rank:\n raise ValueError(assertion_message)\n else:\n control_dependencies = []\n else:\n # We're not able to statically check the rank\n control_dependencies = [\n assert_util.assert_greater_equal(\n actual_rank, requested_rank,\n message=assertion_message)\n ]\n\n with tf.control_dependencies(control_dependencies):\n trimmed_actual_shape = actual_shape[:requested_rank]\n\n control_dependencies = [\n assert_util.assert_equal(\n requested_shape, trimmed_actual_shape,\n message=assertion_message)\n ]\n\n return control_dependencies\n\n def _flat_sample_distributions(self, sample_shape=(), seed=None, value=None):\n \"\"\"Executes `model`, creating both samples and distributions.\"\"\"\n ds = []\n values_out = []\n seed = SeedStream(seed, salt='JointDistributionCoroutine')\n gen = self._model_coroutine()\n index = 0\n d = next(gen)\n if self._require_root and not isinstance(d, self.Root):\n raise ValueError('First distribution yielded by coroutine must '\n 'be wrapped in `Root`.')\n try:\n while True:\n actual_distribution = d.distribution if isinstance(d, self.Root) else d\n ds.append(actual_distribution)\n if (value is not None and len(value) > index and\n value[index] is not None):\n seed() # Ensure reproducibility even when xs are (partially) set.\n\n def convert_tree_to_tensor(x, dtype_hint):\n return tf.convert_to_tensor(x, dtype_hint=dtype_hint)\n\n # This signature does not allow kwarg names. Applies\n # `convert_to_tensor` on the next value.\n next_value = nest.map_structure_up_to(\n ds[-1].dtype, # shallow_tree\n convert_tree_to_tensor, # func\n value[index], # x\n ds[-1].dtype) # dtype_hint\n else:\n next_value = actual_distribution.sample(\n sample_shape=sample_shape if isinstance(d, self.Root) else (),\n seed=seed())\n\n if self._validate_args:\n with tf.control_dependencies(\n self._assert_compatible_shape(\n index, sample_shape, next_value)):\n values_out.append(tf.nest.map_structure(tf.identity, next_value))\n else:\n values_out.append(next_value)\n\n index += 1\n d = gen.send(next_value)\n except StopIteration:\n pass\n return ds, values_out\n\n def _model_unflatten(self, xs):\n if self._sample_dtype is None:\n return structural_tuple.structtuple(self._flat_resolve_names())(*xs)\n # Cast `xs` as `tuple` so we can handle generators.\n return tf.nest.pack_sequence_as(self._sample_dtype, tuple(xs))\n\n def _model_flatten(self, xs):\n if self._sample_dtype is None:\n return tuple(xs)\n return nest.flatten_up_to(self._sample_dtype, xs)\n"
]
| [
[
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.get_static_value",
"tensorflow.python.util.nest.map_structure_up_to",
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.name_scope",
"tensorflow.python.util.nest.flatten_up_to"
]
]
|
WeilerP/scvelo | [
"1805ab4a72d3f34496f0ef246500a159f619d3a2"
]
| [
"scvelo/core/_arithmetic.py"
]
| [
"import warnings\nfrom typing import Optional, Union\n\nimport numpy as np\nfrom numpy import ndarray\nfrom scipy.sparse import issparse, spmatrix\n\n\ndef clipped_log(x: ndarray, lb: float = 0, ub: float = 1, eps: float = 1e-6) -> ndarray:\n \"\"\"Logarithmize between [lb + epsilon, ub - epsilon].\n\n Arguments\n ---------\n x\n Array to invert.\n lb\n Lower bound of interval to which array entries are clipped.\n ub\n Upper bound of interval to which array entries are clipped.\n eps\n Offset of boundaries of clipping interval.\n\n Returns\n -------\n ndarray\n Logarithm of clipped array.\n \"\"\"\n\n return np.log(np.clip(x, lb + eps, ub - eps))\n\n\ndef invert(x: ndarray) -> ndarray:\n \"\"\"Invert array and set infinity to NaN.\n\n Arguments\n ---------\n x\n Array to invert.\n\n Returns\n -------\n ndarray\n Inverted array.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n x_inv = 1 / x * (x != 0)\n return x_inv\n\n\ndef prod_sum(\n a1: Union[ndarray, spmatrix], a2: Union[ndarray, spmatrix], axis: Optional[int]\n) -> ndarray:\n \"\"\"Take sum of product of two arrays along given axis.\n\n Arguments\n ---------\n a1\n First array.\n a2\n Second array.\n axis\n Axis along which to sum elements. If `None`, all elements will be summed.\n Defaults to `None`.\n\n Returns\n -------\n ndarray\n Sum of product of arrays along given axis.\n \"\"\"\n\n if issparse(a1):\n return a1.multiply(a2).sum(axis=axis).A1\n elif axis == 0:\n return np.einsum(\"ij, ij -> j\", a1, a2) if a1.ndim > 1 else (a1 * a2).sum()\n elif axis == 1:\n return np.einsum(\"ij, ij -> i\", a1, a2) if a1.ndim > 1 else (a1 * a2).sum()\n\n\ndef sum(a: Union[ndarray, spmatrix], axis: Optional[int] = None) -> ndarray:\n \"\"\"Sum array elements over a given axis.\n\n Arguments\n ---------\n a\n Elements to sum.\n axis\n Axis along which to sum elements. If `None`, all elements will be summed.\n Defaults to `None`.\n\n Returns\n -------\n ndarray\n Sum of array along given axis.\n \"\"\"\n\n if a.ndim == 1:\n axis = 0\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return a.sum(axis=axis).A1 if issparse(a) else a.sum(axis=axis)\n"
]
| [
[
"scipy.sparse.issparse",
"numpy.einsum",
"numpy.clip"
]
]
|
tperol/neuralnilm | [
"06bd6abc4db41140b65dfc0677677ef2aecff349"
]
| [
"experiment_definitions/e575.py"
]
| [
"from __future__ import print_function, division\n\n# Stop matplotlib from drawing to X.\n# Must be before importing matplotlib.pyplot or pylab!\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom lasagne.layers import InputLayer, DenseLayer, ReshapeLayer\n\nfrom neuralnilm.data.loadactivations import load_nilmtk_activations\nfrom neuralnilm.data.syntheticaggregatesource import SyntheticAggregateSource\nfrom neuralnilm.data.realaggregatesource import RealAggregateSource\nfrom neuralnilm.data.stridesource import StrideSource\nfrom neuralnilm.data.datapipeline import DataPipeline\nfrom neuralnilm.data.processing import DivideBy, IndependentlyCenter\nfrom neuralnilm.net import Net\nfrom neuralnilm.trainer import Trainer\nfrom neuralnilm.metrics import Metrics\nfrom neuralnilm.consts import DATA_FOLD_NAMES\n\n\nNILMTK_FILENAME = '/data/dk3810/ukdale.h5'\nSAMPLE_PERIOD = 6\nSTRIDE = None\nAPPLIANCES = [\n 'kettle', 'microwave', 'washing machine', 'dish washer', 'fridge']\nWINDOWS = {\n 'train': {\n 1: (\"2013-04-12\", \"2015-07-01\"),\n 2: (\"2013-05-22\", \"2013-10-03 06:16:00\"),\n 3: (\"2013-02-27\", \"2013-04-01 06:15:05\"),\n 4: (\"2013-03-09\", \"2013-09-24 06:15:14\")\n },\n 'unseen_activations_of_seen_appliances': {\n 1: (\"2015-07-02\", None),\n 2: (\"2013-10-03 06:16:00\", None),\n 3: (\"2013-04-01 06:15:05\", None),\n 4: (\"2013-09-24 06:15:14\", None)\n },\n 'unseen_appliances': {\n 5: (\"2014-06-29\", None)\n }\n}\n\n\ndef run(root_experiment_name):\n activations = load_nilmtk_activations(\n appliances=APPLIANCES,\n filename=NILMTK_FILENAME,\n sample_period=SAMPLE_PERIOD,\n windows=WINDOWS\n )\n\n for get_net in [ae]:\n for target_appliance in ['kettle']:\n pipeline = get_pipeline(target_appliance, activations)\n\n # Build net\n batch = pipeline.get_batch()\n net = get_net(batch)\n\n # Trainer\n trainer = Trainer(\n net=net,\n data_pipeline=pipeline,\n experiment_id=[\n root_experiment_name, get_net.__name__, target_appliance],\n metrics=Metrics(state_boundaries=[2]), # was 3 up until 230000ish\n learning_rates={0: 1E-2},\n repeat_callbacks=[\n (5000, Trainer.validate),\n (5000, Trainer.save_params),\n (5000, Trainer.plot_estimates)\n ]\n )\n\n report = trainer.submit_report()\n print(report)\n\n # Run!\n trainer.fit(None)\n\n\n# ---------------------- NETWORKS -------------------------\ndef ae(batch):\n input_shape = batch.input.shape\n target_shape = batch.target.shape\n\n input_layer = InputLayer(\n shape=input_shape\n )\n\n # Dense layers\n dense_layer_0 = DenseLayer(\n input_layer,\n num_units=128\n )\n dense_layer_1 = DenseLayer(\n dense_layer_0,\n num_units=64\n )\n dense_layer_2 = DenseLayer(\n dense_layer_1,\n num_units=128\n )\n\n # Output\n final_dense_layer = DenseLayer(\n dense_layer_2,\n num_units=target_shape[1] * target_shape[2],\n nonlinearity=None\n )\n output_layer = ReshapeLayer(\n final_dense_layer,\n shape=target_shape\n )\n\n net = Net(\n output_layer,\n tags=['AE'],\n description=\"Like AE in e567 but without conv layer\",\n predecessor_experiment=\"e567\"\n )\n return net\n\n\n# ------------------------ DATA ----------------------\n\ndef get_pipeline(target_appliance, activations):\n\n if target_appliance == 'kettle':\n seq_length = 128\n train_buildings = [1, 2, 4]\n unseen_buildings = [5]\n num_seq_per_batch = 64\n\n filtered_windows = select_windows(train_buildings, unseen_buildings)\n filtered_activations = filter_activations(filtered_windows, activations)\n\n synthetic_agg_source = SyntheticAggregateSource(\n activations=filtered_activations,\n target_appliance=target_appliance,\n seq_length=seq_length,\n sample_period=SAMPLE_PERIOD\n )\n\n real_agg_source = RealAggregateSource(\n activations=filtered_activations,\n target_appliance=target_appliance,\n seq_length=seq_length,\n filename=NILMTK_FILENAME,\n windows=filtered_windows,\n sample_period=SAMPLE_PERIOD\n )\n\n stride_source = StrideSource(\n target_appliance=target_appliance,\n seq_length=seq_length,\n filename=NILMTK_FILENAME,\n windows=filtered_windows,\n sample_period=SAMPLE_PERIOD,\n stride=STRIDE\n )\n\n sample = real_agg_source.get_batch(num_seq_per_batch=1024).next()\n sample = sample.before_processing\n input_std = sample.input.flatten().std()\n target_std = sample.target.flatten().std()\n pipeline = DataPipeline(\n [synthetic_agg_source, real_agg_source, stride_source],\n num_seq_per_batch=num_seq_per_batch,\n input_processing=[DivideBy(input_std), IndependentlyCenter()],\n target_processing=[DivideBy(target_std)]\n )\n\n return pipeline\n\n\ndef select_windows(train_buildings, unseen_buildings):\n windows = {fold: {} for fold in DATA_FOLD_NAMES}\n\n def copy_window(fold, i):\n windows[fold][i] = WINDOWS[fold][i]\n\n for i in train_buildings:\n copy_window('train', i)\n copy_window('unseen_activations_of_seen_appliances', i)\n for i in unseen_buildings:\n copy_window('unseen_appliances', i)\n return windows\n\n\ndef filter_activations(windows, activations):\n new_activations = {\n fold: {appliance: {} for appliance in APPLIANCES}\n for fold in DATA_FOLD_NAMES}\n for fold, appliances in activations.iteritems():\n for appliance, buildings in appliances.iteritems():\n required_building_ids = windows[fold].keys()\n required_building_names = [\n 'UK-DALE_building_{}'.format(i) for i in required_building_ids]\n for building_name in required_building_names:\n try:\n new_activations[fold][appliance][building_name] = (\n activations[fold][appliance][building_name])\n except KeyError:\n pass\n return activations\n"
]
| [
[
"matplotlib.use"
]
]
|
orcahmlee/ray | [
"20cf2edfef7103c269358a49a48c2159315ee132"
]
| [
"python/ray/air/tests/test_rl_predictor.py"
]
| [
"from typing import Optional\n\nimport gym\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport tempfile\n\nfrom ray.air.predictors.integrations.rl.rl_predictor import RLPredictor\nfrom ray.air.preprocessor import Preprocessor\nfrom ray.air.checkpoint import Checkpoint\nfrom ray.air.train.integrations.rl import RLTrainer\n\nfrom ray.rllib.agents import Trainer\nfrom ray.rllib.policy import Policy\nfrom ray.tune.utils.trainable import TrainableUtil\n\n\nclass _DummyTrainer(Trainer):\n train_exec_impl = None\n\n def setup(self, config):\n self.policy = _DummyPolicy(\n observation_space=gym.spaces.Box(low=-2.0, high=-2.0, shape=(10,)),\n action_space=gym.spaces.Discrete(n=1),\n config={},\n )\n\n def train(self):\n pass\n\n def get_policy(self, *args, **kwargs) -> Policy:\n return self.policy\n\n\nclass _DummyPolicy(Policy):\n \"\"\"Returns actions by averaging over observations and adding a random number\"\"\"\n\n def compute_actions(\n self,\n obs_batch,\n *args,\n **kwargs,\n ):\n return (\n np.random.uniform(0, 1, size=len(obs_batch)) + np.mean(obs_batch, axis=1),\n [],\n {},\n )\n\n\nclass _DummyPreprocessor(Preprocessor):\n def transform_batch(self, df):\n self._batch_transformed = True\n return df * 2\n\n\ndef create_checkpoint(\n preprocessor: Optional[Preprocessor] = None, config: Optional[dict] = None\n) -> Checkpoint:\n rl_trainer = RLTrainer(\n algorithm=_DummyTrainer,\n config=config or {},\n preprocessor=preprocessor,\n )\n rl_trainable_cls = rl_trainer.as_trainable()\n rl_trainable = rl_trainable_cls()\n\n with tempfile.TemporaryDirectory() as checkpoint_dir:\n checkpoint_file = rl_trainable.save(checkpoint_dir)\n checkpoint_path = TrainableUtil.find_checkpoint_dir(checkpoint_file)\n checkpoint_data = Checkpoint.from_directory(checkpoint_path).to_dict()\n\n return Checkpoint.from_dict(checkpoint_data)\n\n\[email protected](\"batch_type\", [list, np.array, pd.DataFrame])\[email protected](\"batch_size\", [1, 20])\ndef test_predict_no_preprocessor(batch_type, batch_size):\n checkpoint = create_checkpoint()\n predictor = RLPredictor.from_checkpoint(checkpoint)\n\n # Observations\n obs = batch_type([[1.0] * 10] * batch_size)\n actions = predictor.predict(obs)\n\n assert len(actions) == batch_size\n # We add [0., 1.) to 1.0, so actions should be in [1., 2.)\n assert all(1.0 <= action < 2.0 for action in actions)\n\n\[email protected](\"batch_type\", [list, np.array, pd.DataFrame])\[email protected](\"batch_size\", [1, 20])\ndef test_predict_with_preprocessor(batch_type, batch_size):\n preprocessor = _DummyPreprocessor()\n checkpoint = create_checkpoint(preprocessor=preprocessor)\n predictor = RLPredictor.from_checkpoint(checkpoint)\n\n # Observations\n obs = batch_type([[1.0] * 10] * batch_size)\n actions = predictor.predict(obs)\n\n assert len(actions) == batch_size\n # Preprocessor doubles observations to 2.0, then we add [0., 1.),\n # so actions should be in [2., 3.)\n assert all(2.0 <= action < 3.0 for action in actions)\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n"
]
| [
[
"numpy.mean"
]
]
|
brbzjl/my_graph_rcnn | [
"a758ca9ab837df70ff5a6c1ce0ac901afcbef24e",
"a758ca9ab837df70ff5a6c1ce0ac901afcbef24e"
]
| [
"lib/model/graph_conv/graph_conv_score_unit.py",
"lib/model/faster_rcnn/faster_rcnn_cascade.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom model.utils.config import cfg\nimport numpy as np\nimport math\nimport pdb\nimport time\nimport pdb\nfrom model.utils.config import cfg\n\ndef normal_init(m, mean, stddev, truncated=False):\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n\nclass _Collection_Unit(nn.Module):\n def __init__(self, dim_in, dim_out):\n super(_Collection_Unit, self).__init__()\n self.fc = nn.Linear(dim_in, dim_out, bias=True)\n normal_init(self.fc, 0, 0.01, cfg.TRAIN.TRUNCATED)\n self.fc_source = nn.Linear(dim_in, 32, bias=True)\n self.fc_target = nn.Linear(dim_out, 32, bias=True)\n # def forward(self, target, source, attention_base):\n # assert attention_base.size(0) == source.size(0), \"source number must be equal to attention number\"\n #\n # # Ninxdin -> Ninx32\n # emb_source = self.fc_source(F.relu(source))\n # # Noutxdout -> Noutx32\n # emb_target = self.fc_target(F.relu(target))\n #\n # # NoutxNin\n # attention_prob = F.softmax(torch.mm(emb_target, emb_source.t())) * attention_base\n #\n # fc_out = self.fc(F.relu(source))\n #\n # collect = torch.mm(attention_prob, fc_out)\n # return collect\n\n def forward(self, target, source, attention_base):\n # assert attention_base.size(0) == source.size(0), \"source number must be equal to attention number\"\n fc_out = self.fc(F.relu(source))\n collect = torch.mm(attention_base, fc_out)\n collect_avg = collect / (attention_base.sum(1).view(collect.size(0), 1) + 1e-7)\n return collect_avg\n\nclass _Update_Unit(nn.Module):\n def __init__(self, dim):\n super(_Update_Unit, self).__init__()\n self.fc_source = nn.Linear(dim, dim, bias=True)\n self.fc_target = nn.Linear(dim, dim, bias=True)\n normal_init(self.fc_source, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.fc_target, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\n # def forward(self, target, source):\n # assert target.size() == source.size(), \"source dimension must be equal to target dimension\"\n # update = self.fc_target(F.relu(target)) + self.fc_source(F.relu(source))\n # return update\n\n def forward(self, target, source):\n assert target.size() == source.size(), \"source dimension must be equal to target dimension\"\n update = target + source\n return update\n\nclass _GraphConvolutionLayer_Collect(nn.Module):\n \"\"\" graph convolutional layer \"\"\"\n \"\"\" collect information from neighbors \"\"\"\n def __init__(self, dim_obj, dim_att, dim_rel):\n super(_GraphConvolutionLayer_Collect, self).__init__()\n self.collect_units = nn.ModuleList()\n self.collect_units.append(_Collection_Unit(dim_obj, dim_att)) # att from obj\n self.collect_units.append(_Collection_Unit(dim_att, dim_obj)) # obj from att\n self.collect_units.append(_Collection_Unit(dim_obj, dim_obj)) # obj from obj\n self.collect_units.append(_Collection_Unit(dim_rel, dim_obj)) # obj (subject) from rel\n self.collect_units.append(_Collection_Unit(dim_rel, dim_obj)) # obj (object) from rel\n self.collect_units.append(_Collection_Unit(dim_obj, dim_rel)) # rel from obj (subject)\n self.collect_units.append(_Collection_Unit(dim_obj, dim_rel)) # rel from obj (object)\n\n def forward(self, target, source, attention, unit_id):\n collection = self.collect_units[unit_id](target, source, attention)\n return collection\n\nclass _GraphConvolutionLayer_Update(nn.Module):\n \"\"\" graph convolutional layer \"\"\"\n \"\"\" update target nodes \"\"\"\n def __init__(self, dim_obj, dim_att, dim_rel):\n super(_GraphConvolutionLayer_Update, self).__init__()\n self.update_units = nn.ModuleList()\n self.update_units.append(_Update_Unit(dim_att)) # att from others\n self.update_units.append(_Update_Unit(dim_obj)) # obj from others\n self.update_units.append(_Update_Unit(dim_rel)) # rel from others\n\n def forward(self, target, source, unit_id):\n update = self.update_units[unit_id](target, source)\n return update\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\n\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom model.utils.config import cfg\n\nfrom model.rpn.rpn import _RPN\nfrom model.roi_pooling.modules.roi_pool import _RoIPooling\n# from model.roi_pooling_single.modules.roi_pool import _RoIPool\nfrom model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer\nfrom model.utils import network\nimport time\nimport pdb\nfrom model.utils.network import _smooth_l1_loss\n\n# from model.utils.vgg16 import VGG16\n\nclass _RCNN_base(nn.Module):\n def __init__(self, baseModels, classes, dout_base_model):\n super(_RCNN_base, self).__init__()\n\n if classes is not None:\n self.classes = classes\n self.n_classes = len(classes)\n\n self.RCNN_base_model = baseModels\n\n # define rpn\n self.RCNN_rpn = _RPN(dout_base_model)\n self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)\n self.RCNN_roi_pool = _RoIPooling(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\n\n def forward(self, im_data, im_info, gt_boxes, num_boxes):\n im_info = im_info.data\n gt_boxes = gt_boxes.data\n num_boxes = num_boxes.data\n\n batch_size = im_data.size(0)\n # feed image data to base model to obtain base feature map\n base_feat = self.RCNN_base_model(im_data)\n\n # feed base feature map tp RPN to obtain rois\n rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)\n\n # if it is training phrase, then use ground trubut bboxes for refining\n if self.training:\n\n roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)\n rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data\n\n rois = Variable(rois)\n rois_label = Variable(rois_label.view(-1))\n rois_target = Variable(rois_target.view(-1, rois_target.size(2)))\n rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))\n rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))\n\n else:\n rois_label = None\n rois_target = None\n rois_inside_ws = None\n rois_outside_ws = None\n rpn_loss_cls = 0\n rpn_loss_bbox = 0\n\n # do roi pooling based on predicted rois\n pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))\n # pooled_feat_all = pooled_feat.view(pooled_feat.size(0), -1)\n\n return rois, pooled_feat, rois_label, rois_target, rois_inside_ws, rois_outside_ws, rpn_loss_cls, rpn_loss_bbox\n\nclass _fasterRCNN(nn.Module):\n \"\"\" faster RCNN \"\"\"\n def __init__(self, classes):\n super(_fasterRCNN, self).__init__()\n self.classes = classes\n self.n_classes = len(classes)\n # loss\n self.RCNN_loss_cls = 0\n self.RCNN_loss_bbox = 0\n\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n\n normal_init(self.RCNN_base.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_base.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_base.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n def create_architecture(self):\n self._init_modules()\n self._init_weights()\n\n def forward(self, im_data, im_info, gt_boxes, num_boxes):\n\n batch_size = im_data.size(0)\n rois, feat_out, rois_label, rois_target, rois_inside_ws, rois_outside_ws, \\\n rpn_loss_cls, rpn_loss_bbox = self.RCNN_base(im_data, im_info, gt_boxes, num_boxes)\n\n # get the rpn loss.\n rpn_loss = rpn_loss_cls + rpn_loss_bbox\n\n # feed pooled features to top model\n feat_out = self._head_to_tail(feat_out)\n\n # compute bbox offset\n bbox_pred = self.RCNN_bbox_pred(feat_out)\n\n # compute object classification probability\n cls_score = self.RCNN_cls_score(feat_out)\n cls_prob = F.softmax(cls_score)\n\n\n self.RCNN_loss_cls = 0\n self.RCNN_loss_bbox = 0\n\n if self.training:\n # classification loss\n label = rois_label.long()\n self.fg_cnt = torch.sum(label.data.ne(0))\n self.bg_cnt = label.data.numel() - self.fg_cnt\n\n self.RCNN_loss_cls = F.cross_entropy(cls_score, label)\n\n # bounding box regression L1 loss\n self.RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws) \n\n rcnn_loss = self.RCNN_loss_cls + self.RCNN_loss_bbox\n\n cls_prob = cls_prob.view(batch_size, rois.size(1), -1)\n bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)\n\n return rois, cls_prob, bbox_pred, rpn_loss, rcnn_loss"
]
| [
[
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.ModuleList",
"torch.mm"
],
[
"torch.autograd.Variable",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax"
]
]
|
sumeetkhatri/QuTIPy | [
"ca2a3344c1caa818504425496ea37278d80b1c44"
]
| [
"qutipy/channels/depolarizing_channel_n_uses.py"
]
| [
"'''\nThis code is part of QuTIpy.\n\n(c) Copyright Sumeet Khatri, 2021\n\nThis code is licensed under the Apache License, Version 2.0. You may\nobtain a copy of this license in the LICENSE.txt file in the root directory\nof this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n\nAny modifications or derivative works of this code must retain this\ncopyright notice, and modified files need to carry a notice indicating\nthat they have been altered from the originals.\n'''\n\n\nimport numpy as np\nfrom numpy.linalg import matrix_power\nimport itertools\n\nfrom qutipy.general_functions import syspermute,eye,partial_trace,tensor\n\n\ndef depolarizing_channel_n_uses(p,n,rho,m):\n\n\n '''\n Generates the output state corresponding to the depolarizing channel\n applied to each one of n systems in the joint state rho. p is the \n depolarizing probability as defined in the function \"depolarizing_channel\"\n above.\n\n If rho contains m>n systems, then the first m-n systems are left alone.\n '''\n\n dims=2*np.ones(m).astype(int)\n\n rho_out=np.zeros((2**m,2**m))\n\n for k in range(n+1):\n indices=list(itertools.combinations(range(1,n+1),k))\n\n #print k,indices\n\n for index in indices:\n index=list(index)\n\n index=np.array(index)+(m-n)\n index=list(index.astype(int))\n\n index_diff=np.setdiff1d(range(1,m+1),index)\n\n perm_arrange=np.append(index,index_diff).astype(int)\n perm_rearrange=np.zeros(m)\n\n for i in range(m):\n perm_rearrange[i]=np.argwhere(perm_arrange==i+1)[0][0]+1\n\n perm_rearrange=perm_rearrange.astype(int)\n\n mix=matrix_power(eye(2**k)/2,k)\n\n rho_part=partial_trace(rho,index,dims)\n\n rho_out=rho_out+(4*p/3.)**k*(1-(4*p/3.))**(n-k)*syspermute(tensor(mix,rho_part),perm_rearrange,dims)\n\n return rho_out"
]
| [
[
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.ones",
"numpy.argwhere"
]
]
|
HFAiLab/ffrecord | [
"e916dc715ffa38a304a673ade7c5aa1efff5936d"
]
| [
"benchs/bench_io.py"
]
| [
"import sys\nimport time\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport multiprocessing as mp\nfrom multiprocessing import Process\n\nfrom ffrecord import FileWriter, FileReader\nfrom ffrecord.fileio import FileReader as PyFileReader\n\n\ndef read_chunk(reader, q):\n t0 = time.time()\n while True:\n r = q.get()\n if r is None:\n break\n\n i0, xn = r\n indexes = list(range(i0, i0 + xn))\n bytes_ = reader.read(indexes)\n t_read = time.time() - t0\n reader.close()\n\n return t_read\n\n\ndef bench_read(implem, Reader, fname, n, bs, nprocs):\n reader = Reader(fname)\n\n q = mp.Queue()\n for i0 in range(0, n, bs):\n ni = min(bs, n - i0)\n q.put([i0, ni])\n\n # use None as sentinel\n for i in range(nprocs):\n q.put(None)\n\n t0 = time.time()\n procs = []\n for i in range(nprocs):\n p = Process(target=read_chunk, args=(reader, q))\n p.start()\n procs.append(p)\n for i in range(nprocs):\n procs[i].join()\n\n t_read = time.time() - t0\n print(f'{implem} read: {t_read}')\n\n return t_read\n\n\ndef main():\n sample_size = 1 * (1 << 20)\n data = bytearray([i % 256 for i in range(sample_size)])\n tmp_dir = '/private_dataset'\n nprocs = 128\n n = 100000\n fname = tmp_dir + f'/test_ss_{sample_size}'\n\n if not Path(fname).exists():\n writer = FileWriter(fname, n)\n\n t0 = time.time()\n for i in range(n):\n writer.write_one(data)\n t_write = time.time() - t0\n writer.close()\n print('cpp write: ', t_write)\n\n n = 100000\n size = n * len(data) / (1 << 30)\n print(f'Reading {size} GB from {fname}')\n batch_sizes = [64, 80, 96, 112, 128]\n\n t_cpp, t_python = [], []\n for bs in batch_sizes:\n t_python.append(bench_read('python', PyFileReader, fname, n, bs, nprocs))\n t_cpp.append(bench_read('cpp', FileReader, fname, n, bs, nprocs))\n\n plt.plot(batch_sizes, [size / b for b in t_cpp], label='C++ read')\n plt.plot(batch_sizes, [size / b for b in t_python], label='Python read')\n plt.title(f'Read, nprocs {nprocs}, sample_size {sample_size}')\n plt.xlabel('batch size')\n plt.ylabel('GB/s')\n plt.legend()\n plt.savefig(f'bench_read_mp_{nprocs}_{sample_size}.png')\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel"
]
]
|
keshavchoudhary87/OG-India | [
"269ee172b837882c826ee7f99507d93f9643128e"
]
| [
"ogindia/tests/test_firm.py"
]
| [
"import pytest\nfrom ogindia import firm\nimport numpy as np\nfrom ogindia.parameters import Specifications\n\n\np1 = Specifications()\nnew_param_values = {\n 'Z': [2.0],\n 'gamma': 0.5,\n 'epsilon': 1.0\n}\n# update parameters instance with new values for test\np1.update_specifications(new_param_values)\nL1 = np.array([4.0])\nK1 = np.array([9.0])\nexpected1 = np.array([12.0])\np2 = Specifications()\nnew_param_values2 = {\n 'Z': [2.0],\n 'gamma': 0.5,\n 'epsilon': 0.2\n}\nexpected2 = np.array([18.84610765])\np3 = Specifications()\nnew_param_values3 = {\n 'Z': [2.0],\n 'gamma': 0.5,\n 'epsilon': 1.2\n}\n# update parameters instance with new values for test\np3.update_specifications(new_param_values3)\nL3 = np.array([1 / 12.0])\nK3 = np.array([1 / 4.0])\nexpected3 = np.array([0.592030917])\n# update parameters instance with new values for test\np2.update_specifications(new_param_values2)\np4 = Specifications()\nnew_param_values4 = {\n 'Z': [2.0],\n 'gamma': 0.5,\n 'epsilon': 1.0,\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p4.J)) / (3 * p4.J))\n}\n# update parameters instance with new values for test\np4.update_specifications(new_param_values4)\nL4 = np.array([4.0, 4.0, 4.0])\nK4 = np.array([9.0, 9.0, 9.0])\nexpected4 = np.array([12.0, 12.0, 12.0])\np5 = Specifications()\nnew_param_values5 = {\n 'Z': [1.5, 2.5, 0.6],\n 'gamma': 0.5,\n 'epsilon': 1.0,\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p5.J)) / (3 * p5.J))\n}\n# update parameters instance with new values for test\np5.update_specifications(new_param_values5)\nexpected5 = np.array([9.0, 15.0, 3.6])\n\n\[email protected]('K,L,p,method,expected',\n [(K1, L1, p1, 'SS', expected1),\n (K1, L1, p2, 'SS', expected2),\n (K3, L3, p3, 'SS', expected3),\n (K4, L4, p4, 'TPI', expected4),\n (K4, L4, p5, 'TPI', expected5)],\n ids=['epsilon=1.0,SS', 'epsilon=0.2,SS',\n 'epsilon=1.2,SS', 'epsilon=1.0,TP',\n 'epsilon=1.0,TP,varyZ'])\ndef test_get_Y(K, L, p, method, expected):\n \"\"\"\n choose values that simplify the calculations and are similar to\n observed values\n \"\"\"\n Y = firm.get_Y(K, L, p, method)\n assert (np.allclose(Y, expected, atol=1e-6))\n\n\np1 = Specifications()\nnew_param_values1 = {\n 'Z': [0.5],\n 'gamma': 0.5,\n 'delta_annual': [0.25],\n 'tau_b': [0.5],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.2\n}\n# update parameters instance with new values for test\np1.update_specifications(new_param_values1)\n# assign values for Y and K variables\nY1 = np.array([2.0])\nK1 = np.array([1.0])\nexpected1 = np.array([0.370449359])\np2 = Specifications()\nnew_param_values2 = {\n 'Z': [0.5],\n 'gamma': 0.5,\n 'tau_b': [0.5],\n 'delta_tau_annual': [0.35],\n 'epsilon': 0.5,\n 'delta_annual': [0.5]\n}\n# update parameters instance with new values for test\np2.update_specifications(new_param_values2)\nexpected2 = np.array([0.675])\np3 = Specifications()\nnew_param_values3 = {\n 'Z': [0.5],\n 'gamma': 0.5,\n 'tau_b': [0.5],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.0,\n 'delta_annual': [0.5]\n}\n# update parameters instance with new values for test\np3.update_specifications(new_param_values3)\nexpected3 = np.array([0.175])\np4 = Specifications()\nnew_param_values4 = {\n 'Z': [0.5],\n 'gamma': 0.5,\n 'tau_b': [0.5],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.2,\n 'delta_annual': [0.5],\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p4.J)) / (3 * p4.J))\n}\n# update parameters instance with new values for test\np4.update_specifications(new_param_values4)\nY4 = np.array([3.0, 3.2, 3.8])\nK4 = np.array([1.8, 1.2, 1.0])\nexpected4 = np.array([-0.117344327, 0.066121991, 0.260484002])\n\np5 = Specifications()\nnew_param_values5 = {\n 'Z': [1.5, 2.5, 0.6],\n 'gamma': 0.5,\n 'tau_b': [0.2, 0.0, 0.5],\n 'delta_tau_annual': [0.35, 0.2, 0.1],\n 'epsilon': 1.2,\n 'delta_annual': [0.5],\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p5.J)) / (3 * p5.J))\n}\n# update parameters instance with new values for test\np5.update_specifications(new_param_values5)\nexpected5 = np.array([-0.064719663, 0.480604113, 0.25383862])\n\n\[email protected]('Y,K,p,method,expected',\n [(Y1, K1, p1, 'SS', expected1),\n (Y1, K1, p2, 'SS', expected2),\n (Y1, K1, p3, 'SS', expected3),\n (Y4, K4, p4, 'TPI', expected4),\n (Y4, K4, p5, 'TPI', expected5)],\n ids=['epsilon=1.2,SS', 'epsilon=0.5,SS',\n 'epsilon=1.0,SS', 'epsilon=1.2,TP',\n 'epsilon=1.2,TP,varyParams'])\ndef test_get_r(Y, K, p, method, expected):\n \"\"\"\n choose values that simplify the calculations and are similar to\n observed values\n \"\"\"\n r = firm.get_r(Y, K, p, method)\n assert (np.allclose(r, expected))\n\n\np1 = Specifications()\nnew_param_values1 = {\n 'Z': [0.5],\n 'gamma': 0.5,\n 'epsilon': 0.2\n}\n# update parameters instance with new values for test\np1.update_specifications(new_param_values1)\nY1 = np.array([2.0])\nL1 = np.array([1.0])\nexpected1 = np.array([16.])\np2 = Specifications()\nnew_param_values2 = {\n 'Z': [0.5],\n 'gamma': 0.5,\n 'epsilon': 1.5\n}\n# update parameters instance with new values for test\np2.update_specifications(new_param_values2)\nexpected2 = np.array([0.793700526])\np3 = Specifications()\nnew_param_values3 = {\n 'Z': [0.5],\n 'gamma': 0.5,\n 'epsilon': 1.0\n}\n# update parameters instance with new values for test\np3.update_specifications(new_param_values3)\nexpected3 = np.array([1.0])\np4 = Specifications()\nnew_param_values4 = {\n 'Z': [0.5, 0.47],\n 'gamma': 0.5,\n 'epsilon': 1.2,\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p4.J)) / (3 * p4.J))\n}\n# update parameters instance with new values for test\np4.update_specifications(new_param_values4)\nY4 = np.array([2.0, 2.0, 2.0])\nL4 = np.array([1.0, 1.0, 1.0])\nexpected4 = np.array([0.890898718, 0.881758476, 0.881758476])\n\n\[email protected]('Y,L,p,method,expected',\n [(Y1, L1, p1, 'SS', expected1),\n (Y1, L1, p2, 'SS', expected2),\n (Y1, L1, p3, 'SS', expected3),\n (Y4, L4, p4, 'TPI', expected4)],\n ids=['epsilon=0.2,SS', 'epsilon=1.5,SS',\n 'epsilon=1.0,SS', 'epsilon=1.2,TP'])\ndef test_get_w(Y, L, p, method, expected):\n \"\"\"\n choose values that simplify the calculations and are similar to\n observed values\n \"\"\"\n w = firm.get_w(Y, L, p, method)\n assert (np.allclose(w, expected, atol=1e-6))\n\n\np1 = Specifications()\nnew_param_values1 = {\n 'Z': [0.5],\n 'gamma': 0.4,\n 'epsilon': 0.8,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'tau_b': [0.0357]\n}\n# update parameters instance with new values for test\np1.update_specifications(new_param_values1)\nr1 = np.array([0.01])\nexpected1 = np.array([10.30175902])\np2 = Specifications()\nnew_param_values2 = {\n 'Z': [0.5],\n 'gamma': 0.4,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.2,\n 'tau_b': [0.0357]\n}\n# update parameters instance with new values for test\np2.update_specifications(new_param_values2)\nexpected2 = np.array([215.1799075])\np3 = Specifications()\nnew_param_values3 = {\n 'Z': [0.5],\n 'gamma': 0.4,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.0,\n 'tau_b': [0.0357]\n}\n# update parameters instance with new values for test\np3.update_specifications(new_param_values3)\nexpected3 = np.array([10.33169079])\np4 = Specifications()\nnew_param_values4 = {\n 'Z': [0.5, 0.1, 1.1],\n 'gamma': 0.4,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'epsilon': 0.5,\n 'tau_b': [0.0357],\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p4.J)) / (3 * p4.J))\n}\n# update parameters instance with new values for test\np4.update_specifications(new_param_values4)\nr4 = np.array([0.01, 0.04, 0.55])\nexpected4 = np.array([0.465031434, -0.045936078, 0.575172024])\n\n\[email protected]('r,p,method,expected',\n [(r1, p1, 'SS', expected1),\n (r1, p2, 'SS', expected2),\n (r1, p3, 'SS', expected3),\n (r4, p4, 'TPI', expected4)],\n ids=['epsilon=0.8,SS', 'epsilon=1.2,SS',\n 'epsilon=1.0,SS', 'epsilon=0.5,TP'])\ndef test_get_KLratio_from_r(r, p, method, expected):\n \"\"\"\n choose values that simplify the calculations and are similar to\n observed values\n \"\"\"\n KLratio = firm.get_KLratio_from_r(r, p, method)\n assert (np.allclose(KLratio, expected, atol=1e-6))\n\n\np1 = Specifications()\nnew_param_values1 = {\n 'Z': [0.5],\n 'gamma': 0.4,\n 'epsilon': 0.8,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'tau_b': [0.0357]\n}\n# update parameters instance with new values for test\np1.update_specifications(new_param_values1)\nr1 = np.array([0.04])\nexpected1 = np.array([1.265762107])\np2 = Specifications()\nnew_param_values2 = {\n 'Z': [0.5],\n 'gamma': 0.4,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.0,\n 'tau_b': [0.0357]\n}\n# update parameters instance with new values for test\np2.update_specifications(new_param_values2)\nexpected2 = np.array([0.550887455])\np3 = Specifications()\nnew_param_values3 = {\n 'Z': [0.5],\n 'gamma': 0.4,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.2,\n 'tau_b': [0.0357]\n}\n# update parameters instance with new values for test\np3.update_specifications(new_param_values3)\nexpected3 = np.array([2.855428923])\np4 = Specifications()\nnew_param_values4 = {\n 'Z': [0.5, 1.0, 4.0],\n 'gamma': 0.4,\n 'delta_annual': [0.05],\n 'delta_tau_annual': [0.35],\n 'epsilon': 1.2,\n 'tau_b': [0.0357],\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p4.J)) / (3 * p4.J))\n}\n# update parameters instance with new values for test\np4.update_specifications(new_param_values4)\nr4 = np.array([0.04, 0.04, 0.04])\nexpected4 = np.array([0.380178134, 1.19149279, 17.8375083])\n\n\[email protected]('r,p,method,expected',\n [(r1, p1, 'SS', expected1),\n (r1, p2, 'SS', expected2),\n (r1, p3, 'SS', expected3),\n (r4, p4, 'TPI', expected4)],\n ids=['epsilon=0.8,SS', 'epsilon=1.0,SS',\n 'epsilon=1.2,SS', 'epsilon=1.2,TP'])\ndef test_get_w_from_r(r, p, method, expected):\n \"\"\"\n choose values that simplify the calculations and are similar to\n observed values\n \"\"\"\n w = firm.get_w_from_r(r, p, method)\n assert (np.allclose(w, expected, atol=1e-6))\n\n\np1 = Specifications()\nnew_param_values1 = {\n 'gamma': 0.5,\n 'tau_b': [0.75],\n 'delta_annual': [0.15],\n 'delta_tau_annual': [0.03],\n 'Z': [2.0],\n 'epsilon': 1.2\n}\n# update parameters instance with new values for test\np1.update_specifications(new_param_values1)\nL1 = np.array([2.0])\nr1 = np.array([1.0])\nexpected1 = np.array([0.325726586])\np2 = Specifications()\nnew_param_values2 = {\n 'gamma': 0.5,\n 'tau_b': [0.75],\n 'delta_annual': [0.15],\n 'delta_tau_annual': [0.03],\n 'Z': [2.0],\n 'epsilon': 1.0\n}\n# update parameters instance with new values for test\np2.update_specifications(new_param_values2)\nexpected2 = np.array([0.098327933])\np3 = Specifications()\nnew_param_values3 = {\n 'gamma': 0.5,\n 'epsilon': 0.4,\n 'Z': [4.0],\n 'tau_b': [0.0],\n 'delta_tau_annual': [0.5],\n 'delta_annual': [0.05]\n}\n# update parameters instance with new values for test\np3.update_specifications(new_param_values3)\nexpected3 = np.array([4.577211711])\np4 = Specifications()\nnew_param_values4 = {\n 'gamma': 0.5,\n 'epsilon': 0.4,\n 'Z': [4.0, 3.0],\n 'delta_tau_annual': [0.5],\n 'delta_annual': [0.05],\n 'tau_b': [0.5],\n 'T': 3,\n 'S': 3,\n 'eta': (np.ones((3, p4.J)) / (3 * p4.J))\n}\n# update parameters instance with new values for test\np4.update_specifications(new_param_values4)\nL4 = np.array([2.0, 2.0, 2.0])\nr4 = np.array([1.0, 1.0, 1.0])\nexpected4 = np.array([2.801139363, 2.303692012, 2.303692012])\n\n\[email protected]('L,r,p,method,expected',\n [(L1, r1, p1, 'SS', expected1),\n (L1, r1, p2, 'SS', expected2),\n (L1, r1, p3, 'SS', expected3),\n (L4, r4, p4, 'TPI', expected4)],\n ids=['epsilon=1.2,SS', 'epsilon=1.0,SS',\n 'epsilon=0.4,SS', 'epsilon=0.4,TP'])\ndef test_get_K(L, r, p, method, expected):\n \"\"\"\n choose values that simplify the calculations and are similar to\n observed values\n \"\"\"\n K = firm.get_K(L, r, p, method)\n assert (np.allclose(K, expected, atol=1e-6))\n"
]
| [
[
"numpy.allclose",
"numpy.array",
"numpy.ones"
]
]
|
alencon/codecarbon | [
"a65e2f55b4121573763358ca2622cc38d3b5c76e"
]
| [
"codecarbon/viz/carbonboard.py"
]
| [
"import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_table as dt\nimport fire\nimport pandas as pd\nfrom dash.dependencies import Input, Output\n\nfrom codecarbon.viz.components import Components\nfrom codecarbon.viz.data import Data\n\n\ndef render_app(df: pd.DataFrame):\n app = dash.Dash(__name__, external_stylesheets=[dbc.themes.COSMO])\n\n components = Components()\n header = components.get_header()\n net_summary = components.get_net_summary()\n project_dropdown = components.get_project_dropdown(df)\n project_details = components.get_project_details()\n exemplary_equivalents = components.get_exemplary_equivalents()\n _hidden_project_data = components.get_hidden_project_data()\n _hidden_project_summary = components.get_hidden_project_summary()\n cloud_emissions_comparison = components.get_cloud_emissions_comparison()\n global_comparison = components.get_global_comparison()\n regional_comparison = components.get_regional_emissions_comparison()\n project_time_series = components.get_project_time_series()\n project_emissions_bar_chart = components.get_project_emissions_bar_chart()\n references = components.get_references()\n\n data = Data()\n\n app.layout = dbc.Container(\n [\n header,\n net_summary,\n project_dropdown,\n project_details,\n exemplary_equivalents,\n cloud_emissions_comparison,\n global_comparison,\n regional_comparison,\n project_time_series,\n project_emissions_bar_chart,\n references,\n _hidden_project_data,\n _hidden_project_summary,\n ],\n style={\"padding-top\": \"50px\"},\n )\n\n @app.callback(\n [\n Output(component_id=\"hidden_project_data\", component_property=\"children\"),\n Output(component_id=\"hidden_project_summary\", component_property=\"data\"),\n Output(component_id=\"net_power_consumption\", component_property=\"children\"),\n Output(component_id=\"net_carbon_equivalent\", component_property=\"children\"),\n Output(\n component_id=\"project_infrastructure_location\",\n component_property=\"children\",\n ),\n Output(\n component_id=\"project_power_consumption\", component_property=\"children\"\n ),\n Output(\n component_id=\"project_carbon_equivalent\", component_property=\"children\"\n ),\n Output(\n component_id=\"last_run_power_consumption\", component_property=\"children\"\n ),\n Output(\n component_id=\"last_run_carbon_equivalent\", component_property=\"children\"\n ),\n ],\n [Input(component_id=\"project_name\", component_property=\"value\")],\n )\n def update_project_data(project_name: str):\n project_data = data.get_project_data(df, project_name)\n project_summary = data.get_project_summary(project_data.data)\n net_power_consumption = f\"{'{:.1f}'.format(sum(df['energy_consumed']))} kWh\"\n net_carbon_equivalent = f\"{'{:.1f}'.format(sum(df['emissions']))} kg\"\n if {project_summary[\"region\"]} == \"\":\n project_infrastructure_location = f\"{project_summary['country_name']}\"\n else:\n project_infrastructure_location = (\n f\"{project_summary['region']}, {project_summary['country_name']}\"\n )\n project_power_consumption = (\n f\"{round(project_summary['total']['energy_consumed'],1)} kWh\"\n )\n project_carbon_equivalent = (\n f\"{round(project_summary['total']['emissions'],1)} kg\"\n )\n last_run_power_consumption = (\n f\"{project_summary['last_run']['energy_consumed']} kWh\"\n )\n last_run_carbon_equivalent = f\"{project_summary['last_run']['emissions']} kg\"\n\n return (\n project_data,\n project_summary,\n net_power_consumption,\n net_carbon_equivalent,\n project_infrastructure_location,\n project_power_consumption,\n project_carbon_equivalent,\n last_run_power_consumption,\n last_run_carbon_equivalent,\n )\n\n @app.callback(\n [\n Output(component_id=\"house_icon\", component_property=\"src\"),\n Output(component_id=\"car_icon\", component_property=\"src\"),\n Output(component_id=\"tv_icon\", component_property=\"src\"),\n Output(component_id=\"car_miles\", component_property=\"children\"),\n Output(component_id=\"tv_time\", component_property=\"children\"),\n Output(component_id=\"household_fraction\", component_property=\"children\"),\n ],\n [Input(component_id=\"hidden_project_summary\", component_property=\"data\")],\n )\n def update_exemplary_equivalents(hidden_project_summary: dcc.Store):\n project_carbon_equivalent = hidden_project_summary[\"total\"][\"emissions\"]\n house_icon = app.get_asset_url(\"house_icon.png\")\n car_icon = app.get_asset_url(\"car_icon.png\")\n tv_icon = app.get_asset_url(\"tv_icon.png\")\n car_miles = f\"{data.get_car_miles(project_carbon_equivalent)} miles\"\n tv_time = data.get_tv_time(project_carbon_equivalent)\n household_fraction = (\n f\"{data.get_household_fraction(project_carbon_equivalent)} %\"\n )\n return house_icon, car_icon, tv_icon, car_miles, tv_time, household_fraction\n\n @app.callback(\n [\n Output(\n component_id=\"global_emissions_choropleth\", component_property=\"figure\"\n ),\n Output(\n component_id=\"global_energy_mix_choropleth\", component_property=\"figure\"\n ),\n ],\n [\n Input(component_id=\"hidden_project_summary\", component_property=\"data\"),\n Input(component_id=\"energy_type\", component_property=\"value\"),\n ],\n )\n def update_global_comparisons(hidden_project_summary: dcc.Store, energy_type: str):\n net_energy_consumed = hidden_project_summary[\"total\"][\"energy_consumed\"]\n global_emissions_choropleth_data = data.get_global_emissions_choropleth_data(\n net_energy_consumed\n )\n\n return (\n components.get_global_emissions_choropleth_figure(\n global_emissions_choropleth_data\n ),\n components.get_global_energy_mix_choropleth_figure(\n energy_type, global_emissions_choropleth_data\n ),\n )\n\n @app.callback(\n Output(\n component_id=\"regional_emissions_comparison_component\",\n component_property=\"style\",\n ),\n [Input(component_id=\"hidden_project_summary\", component_property=\"data\")],\n )\n def update_show_regional_comparison(hidden_project_summary: dcc.Store):\n country_iso_code = hidden_project_summary[\"country_iso_code\"]\n # add country codes here to render for different countries\n if country_iso_code.upper() in [\"USA\", \"CAN\"]:\n return {\"display\": \"block\"}\n else:\n return {\"display\": \"none\"}\n\n @app.callback(\n [\n Output(component_id=\"country_name\", component_property=\"children\"),\n Output(\n component_id=\"regional_emissions_comparison_choropleth\",\n component_property=\"figure\",\n ),\n ],\n [Input(component_id=\"hidden_project_summary\", component_property=\"data\")],\n )\n def update_regional_comparison_choropleth(hidden_project_summary: dcc.Store):\n country_name = hidden_project_summary[\"country_name\"]\n country_iso_code = hidden_project_summary[\"country_iso_code\"]\n net_energy_consumed = hidden_project_summary[\"total\"][\"energy_consumed\"]\n regional_emissions_choropleth_data = (\n data.get_regional_emissions_choropleth_data(\n net_energy_consumed, country_iso_code\n )\n )\n\n return (\n country_name,\n components.get_regional_emissions_choropleth_figure(\n regional_emissions_choropleth_data, country_iso_code\n ),\n )\n\n @app.callback(\n Output(component_id=\"project_time_series\", component_property=\"figure\"),\n [Input(component_id=\"hidden_project_data\", component_property=\"children\")],\n )\n def update_project_time_series(hidden_project_data: dt.DataTable):\n return components.get_project_time_series_figure(\n hidden_project_data[\"props\"][\"data\"]\n )\n\n @app.callback(\n Output(component_id=\"project_emissions_bar_chart\", component_property=\"figure\"),\n [Input(component_id=\"hidden_project_data\", component_property=\"children\")],\n )\n def update_project_time_series(hidden_project_data: dt.DataTable):\n return components.get_project_emissions_bar_chart_figure(\n hidden_project_data[\"props\"][\"data\"]\n )\n\n @app.callback(\n Output(\n component_id=\"cloud_emissions_comparison_component\",\n component_property=\"style\",\n ),\n [Input(component_id=\"hidden_project_summary\", component_property=\"data\")],\n )\n def update_on_cloud(hidden_project_summary: dcc.Store):\n on_cloud = hidden_project_summary[\"on_cloud\"]\n if on_cloud == \"Y\":\n return {\"display\": \"block\"}\n else:\n return {\"display\": \"none\"}\n\n @app.callback(\n [\n Output(component_id=\"cloud_provider_name\", component_property=\"children\"),\n Output(\n component_id=\"cloud_emissions_barchart\", component_property=\"figure\"\n ),\n Output(component_id=\"cloud_recommendation\", component_property=\"children\"),\n ],\n [Input(component_id=\"hidden_project_summary\", component_property=\"data\")],\n )\n def update_cloud_emissions_barchart(hidden_project_summary: dcc.Store):\n on_cloud = hidden_project_summary[\"on_cloud\"]\n net_energy_consumed = hidden_project_summary[\"total\"][\"energy_consumed\"]\n cloud_provider = hidden_project_summary[\"cloud_provider\"]\n cloud_region = hidden_project_summary[\"cloud_region\"]\n (\n cloud_provider_name,\n cloud_emissions_barchart_data,\n ) = data.get_cloud_emissions_barchart_data(\n net_energy_consumed, on_cloud, cloud_provider, cloud_region\n )\n\n return (\n cloud_provider_name,\n components.get_cloud_emissions_barchart_figure(\n cloud_emissions_barchart_data\n ),\n components.get_cloud_recommendation(\n on_cloud, cloud_provider_name, cloud_emissions_barchart_data\n ),\n )\n\n return app\n\n\ndef viz(filepath: str, port: int = 8050, debug: bool = False) -> None:\n df = pd.read_csv(filepath)\n app = render_app(df)\n app.run_server(port=port, debug=debug)\n\n\ndef main():\n fire.Fire(viz)\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"pandas.read_csv"
]
]
|
PancakeAwesome/CRNN_tensorflow | [
"08fd19880d9c228e75aa1a642fc811932324be00"
]
| [
"helper_demo.py"
]
| [
"import numpy as np\nimport os\nimport shutil\n\ndef split_train_val(X, y, train_size):\n \"\"\"Split dataset for training and validation.\n 把数据集切分成训练集和验证集\n Args:\n X: A 1-D numpy array containing pathes of images.\n y: A 1-D numpy array containing labels.\n train_size: Size of training data to split.\n Returns:\n 1-D numpy array having the same definition with X and y.\n \"\"\"\n\n total_size = len(X)\n # 打乱数据\n shuffle_indices = np.random.permutation(np.arange(total_size))\n X = X[shuffle_indices]\n y = y[shuffle_indices]\n\n # 切分训练数据\n train_indices = np.random.choice(total_size, train_size, replace = False)\n X_train = X[train_indices]\n y_train = y[train_indices]\n\n # 切分验证集数据\n # 数据集其余的数据用来做验证集\n val_indices = [i for i in xrange(total_size) if i not in train_indices]\n X_val = X[val_indices]\n y_val = y[val_indices]\n\n return X_train, y_train, X_val, y_val\n\ndef write_to_file(data, file_to_output):\n \"\"\"Write X_train/y_train/X_val/y_val/X_infer to file for further\n processing (e.g. make input queue of tensorflow).\n 将训练集,验证集,测试集数据写入文件中\n Args:\n data: A 1-D numpy array, e.g, X_train/y_train/X_val/y_val/X_infer.\n file_to_output: A file to store data.\n \"\"\"\n # with open('X_train.csv','a') as f_handle:\n # np.savetxt(f_handle, X_train, fmt='%s', delimiter=\",\")\n\n with open(file_to_output, 'w') as f:\n for item in data.tolist():\n f.write(item + '\\n')\n\n# 从txt文件中读取labels\ndef load_labels(file):\n labels = list(open(file).readlines())\n # 去空格\n labels = [s.strip() for s in labels]\n labels = [s.split() for s in labels]\n\n labels_dict = dict(labels)\n\n labels = np.asarray(labels, dtype = str)\n labels = labels[:, 0]\n\n return labels, labels_dict\n\n# 从文件夹中读取图片名字的列表\ndef load_img_path(images_path):\n tmp = os.listdir(images_path)\n tmp.sort(key = lambda x: int(x.split('.')[0]))\n\n file_names = [images_path + s for s in tmp]\n\n file_names = np.asarray(file_names)\n\n return file_names\n\ndef load_data(file_to_read):\n \"\"\"Load X_train/y_train/X_val/y_val/X_infer for further\n processing (e.g. make input queue of tensorflow).\n\n Args:\n file_to_read:\n Returns:\n X_train/y_train/X_val/y_val/X_infer.\n \"\"\"\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data\n\n\ndef cp_file(imgs_list_para, labels_list_para, dst_para):\n \"\"\"\n 最后的data格式为:./imgs/train/id_label.png\n \"\"\"\n for i in xrange(imgs_list_para.shape[0]):\n file_path = imgs_list_para[i]\n\n filename = os.path.basename(file_path)\n fn = filename.split('.')[0]\n ext = filename.split('.')[1]\n\n dest_filename = dst_para + fn + '_' + labels_list_para[i] + '.' + ext\n\n shutil.copyfile(file_path, dest_filename)\n\nif __name__ == '__main__':\n labels_path = './imgs/labels.txt'\n labels, labels_dict = load_labels(labels_path)\n # print(labels)\n\n images_path = './imgs/image_contest_level_1/'\n images_path_list = load_img_path(images_path)\n # print(images_path_list[:10])\n\n X_train, y_train, X_val, y_val = split_train_val(images_path_list, labels, 80000)\n write_to_file(X_train, \"./imgs/X_train.txt\")\n write_to_file(y_train, \"./imgs/y_train.txt\")\n write_to_file(X_val, \"./imgs/X_val.txt\")\n write_to_file(y_val, \"./imgs/y_val.txt\")\n\n cp_file(X_train, y_train, './imgs/train/')\n cp_file(X_val, y_val, './imgs/val/')"
]
| [
[
"numpy.arange",
"numpy.random.choice",
"numpy.asarray",
"numpy.recfromtxt"
]
]
|
LaGuer/K3D-jupyter | [
"7be6f413c8a4787d3f3b83654cd5f311fd6d615d"
]
| [
"k3d/objects.py"
]
| [
"import ipywidgets as widgets\nimport numpy as np\nfrom ipydatawidgets import DataUnion, data_union_serialization\nfrom traitlets import Unicode, Int, Float, List, Bool, Bytes, Integer, Dict, Union\nfrom traitlets import validate, TraitError\nfrom traittypes import Array\n\nfrom ._version import __version__ as version\nfrom .helpers import array_serialization_wrap, shape_validation, validate_sparse_voxels\nfrom .validation.stl import AsciiStlData, BinaryStlData\n\nEPSILON = np.finfo(np.float32).eps\n\n\nclass TimeSeries(Union):\n def __init__(self, trait):\n if isinstance(trait, list):\n Union.__init__(self, trait + [Dict(t) for t in trait])\n else:\n Union.__init__(self, [trait, Dict(trait)])\n\n\nclass ListOrArray(List):\n _cast_types = (tuple, np.ndarray)\n\n def __init__(self, *args, **kwargs):\n self._empty_ok = kwargs.pop('empty_ok', False)\n List.__init__(self, *args, **kwargs)\n\n def validate_elements(self, obj, value):\n if self._empty_ok and len(value) == 0:\n return list(value)\n return super(ListOrArray, self).validate_elements(obj, value)\n\n\nclass VoxelChunk(widgets.Widget):\n \"\"\"\n Voxel chunk class for selective updating voxels\n \"\"\"\n\n _model_name = Unicode('ChunkModel').tag(sync=True)\n _model_module = Unicode('k3d').tag(sync=True)\n _model_module_version = Unicode(version).tag(sync=True)\n\n id = Int().tag(sync=True)\n voxels = Array(dtype=np.uint8).tag(sync=True, **array_serialization_wrap('voxels'))\n coord = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('coord'))\n multiple = Int().tag(sync=True)\n\n def push_data(self, field):\n self.notify_change({'name': field, 'type': 'change'})\n\n def __init__(self, **kwargs):\n self.id = id(self)\n super(VoxelChunk, self).__init__(**kwargs)\n\n def __getitem__(self, name):\n return getattr(self, name)\n\n\nclass Drawable(widgets.Widget):\n \"\"\"\n Base class for drawable objects and groups.\n \"\"\"\n\n _model_name = Unicode('ObjectModel').tag(sync=True)\n _model_module = Unicode('k3d').tag(sync=True)\n _model_module_version = Unicode(version).tag(sync=True)\n\n id = Integer().tag(sync=True)\n name = Unicode(default_value=None, allow_none=True).tag(sync=True)\n visible = Bool(True).tag(sync=True)\n compression_level = Integer().tag(sync=True)\n\n def __getitem__(self, name):\n return getattr(self, name)\n\n def __init__(self, **kwargs):\n self.id = id(self)\n\n super(Drawable, self).__init__(**kwargs)\n\n def __iter__(self):\n return (self,).__iter__()\n\n def __add__(self, other):\n return Group(self, other)\n\n def fetch_data(self, field):\n \"\"\"Request updating the value of a field modified in browser.\n\n For data modified in the widget on the browser side, this triggers an asynchronous\n update of the value in the Python kernel.\n\n Only specific features require this mechanism, e.g. the in-browser editing of voxels.\n\n Arguments:\n field: `str`.\n The field name.\"\"\"\n self.send({'msg_type': 'fetch', 'field': field})\n\n def push_data(self, field):\n \"\"\"Request updating the value of a field modified in backend.\n\n For data modified in the backend side, this triggers an asynchronous\n update of the value in the browser widget.\n\n Only specific features require this mechanism, e.g. the in-browser editing of voxels.\n\n Arguments:\n field: `str`.\n The field name.\"\"\"\n self.notify_change({'name': field, 'type': 'change'})\n\n def _ipython_display_(self, **kwargs):\n \"\"\"Called when `IPython.display.display` is called on the widget.\"\"\"\n import k3d\n plot = k3d.plot()\n plot += self\n plot.display()\n\n\nclass Group(Drawable):\n \"\"\"\n An aggregated group of Drawables, itself a Drawable.\n\n It can be inserted or removed from a Plot including all members.\n \"\"\"\n\n __objs = None\n\n def __init__(self, *args):\n self.__objs = tuple(self.__assert_drawable(drawable) for drawables in args for drawable in drawables)\n\n def __iter__(self):\n return self.__objs.__iter__()\n\n def __setattr__(self, key, value):\n \"\"\"Special method override which allows for setting model matrix for all members of the group.\"\"\"\n if key == 'model_matrix':\n for d in self:\n d.model_matrix = value\n else:\n super(Group, self).__setattr__(key, value)\n\n @staticmethod\n def __assert_drawable(arg):\n assert isinstance(arg, Drawable)\n\n return arg\n\n\n# DRAWABLE OBJECTS\n\n\nclass Line(Drawable):\n \"\"\"\n A path (polyline) made up of line segments.\n\n Attributes:\n vertices: `array_like`.\n An array with (x, y, z) coordinates of segment endpoints.\n colors: `array_like`.\n Same-length array of (`int`) packed RGB color of the points (0xff0000 is red, 0xff is blue).\n color: `int`.\n Packed RGB color of the lines (0xff0000 is red, 0xff is blue) when `colors` is empty.\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each vertex.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n width: `float`.\n The thickness of the lines.\n shader: `str`.\n Display style (name of the shader used) of the lines.\n Legal values are:\n\n :`simple`: simple lines,\n\n :`thick`: thick lines,\n\n :`mesh`: high precision triangle mesh of segments (high quality and GPU load).\n radial_segments: 'int':\n Number of segmented faces around the circumference of the tube.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n\n vertices = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('vertices'))\n colors = TimeSeries(Array(dtype=np.uint32)).tag(sync=True, **array_serialization_wrap('colors'))\n color = TimeSeries(Int(min=0, max=0xffffff)).tag(sync=True)\n width = TimeSeries(Float(min=EPSILON, default_value=0.01)).tag(sync=True)\n attribute = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('attribute'))\n color_map = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('color_map'))\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(sync=True)\n shader = TimeSeries(Unicode()).tag(sync=True)\n radial_segments = TimeSeries(Int()).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(Line, self).__init__(**kwargs)\n\n self.set_trait('type', 'Line')\n\n @validate('colors')\n def _validate_colors(self, proposal):\n if type(proposal['value']) is dict or type(self.vertices) is dict:\n return proposal['value']\n\n required = self.vertices.size // 3 # (x, y, z) triplet per 1 color\n actual = proposal['value'].size\n if actual != 0 and required != actual:\n raise TraitError('colors has wrong size: %s (%s required)' % (actual, required))\n return proposal['value']\n\n\nclass MarchingCubes(Drawable):\n \"\"\"\n An isosurface in a scalar field obtained through Marching Cubes algorithm.\n\n The default domain of the scalar field is -0.5 < x, y, z < 0.5.\n If the domain should be different, the bounding box needs to be transformed using the model_matrix.\n\n Attributes:\n scalar_field: `array_like`.\n A 3D scalar field of values.\n level: `float`.\n Value at the computed isosurface.\n color: `int`.\n Packed RGB color of the isosurface (0xff0000 is red, 0xff is blue).\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n opacity: `float`.\n Opacity of mesh.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n scalar_field = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('scalar_field'))\n level = Float().tag(sync=True)\n color = Int(min=0, max=0xffffff).tag(sync=True)\n wireframe = Bool().tag(sync=True)\n flat_shading = Bool().tag(sync=True)\n opacity = Float(min=0.0, max=1.0, default_value=1.0).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(MarchingCubes, self).__init__(**kwargs)\n\n self.set_trait('type', 'MarchingCubes')\n\n\nclass Mesh(Drawable):\n \"\"\"\n A 3D triangles mesh.\n\n Attributes:\n vertices: `array_like`.\n Array of triangle vertices: float (x, y, z) coordinate triplets.\n indices: `array_like`.\n Array of vertex indices: int triplets of indices from vertices array.\n color: `int`.\n Packed RGB color of the mesh (0xff0000 is red, 0xff is blue) when not using color maps.\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each vertex.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n opacity: `float`.\n Opacity of mesh.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n vertices = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('vertices'))\n indices = TimeSeries(Array(dtype=np.uint32)).tag(sync=True, **array_serialization_wrap('indices'))\n color = TimeSeries(Int(min=0, max=0xffffff)).tag(sync=True)\n attribute = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('attribute'))\n color_map = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('color_map'))\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(sync=True)\n wireframe = TimeSeries(Bool()).tag(sync=True)\n flat_shading = TimeSeries(Bool()).tag(sync=True)\n opacity = Float(min=0.0, max=1.0, default_value=1.0).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(Mesh, self).__init__(**kwargs)\n\n self.set_trait('type', 'Mesh')\n\n\nclass Points(Drawable):\n \"\"\"\n A point cloud.\n\n Attributes:\n positions: `array_like`.\n Array with (x, y, z) coordinates of the points.\n colors: `array_like`.\n Same-length array of (`int`) packed RGB color of the points (0xff0000 is red, 0xff is blue).\n color: `int`.\n Packed RGB color of the points (0xff0000 is red, 0xff is blue) when `colors` is empty.\n point_size: `float`.\n Diameter of the balls representing the points in 3D space.\n shader: `str`.\n Display style (name of the shader used) of the points.\n Legal values are:\n\n :`flat`: simple circles with uniform color,\n\n :`dot`: simple dot with uniform color,\n\n :`3d`: little 3D balls,\n\n :`3dSpecular`: little 3D balls with specular lightning,\n\n :`mesh`: high precision triangle mesh of a ball (high quality and GPU load).\n mesh_detail: `int`.\n Default is 2. Setting this to a value greater than 0 adds more vertices making it no longer an\n icosahedron. When detail is greater than 1, it's effectively a sphere. Only valid if shader='mesh'\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n positions = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('positions'))\n colors = TimeSeries(Array(dtype=np.uint32)).tag(sync=True, **array_serialization_wrap('colors'))\n color = TimeSeries(Int(min=0, max=0xffffff)).tag(sync=True)\n point_size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n opacity = TimeSeries(Float(min=0.0, max=1.0, default_value=1.0)).tag(sync=True)\n shader = TimeSeries(Unicode()).tag(sync=True)\n mesh_detail = TimeSeries(Int(min=0, max=8)).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(Points, self).__init__(**kwargs)\n\n self.set_trait('type', 'Points')\n\n @validate('colors')\n def _validate_colors(self, proposal):\n required = self.positions.size // 3 # (x, y, z) triplet per 1 color\n actual = proposal['value'].size\n if actual != 0 and required != actual:\n raise TraitError('colors has wrong size: %s (%s required)' % (actual, required))\n return proposal['value']\n\n\nclass STL(Drawable):\n \"\"\"\n A STereoLitograpy 3D geometry.\n\n STL is a popular format introduced for 3D printing. There are two sub-formats - ASCII and binary.\n\n Attributes:\n text: `str`.\n STL data in text format (ASCII STL).\n binary: `bytes`.\n STL data in binary format (Binary STL).\n The `text` attribute should be set to None when using Binary STL.\n color: `int`.\n Packed RGB color of the resulting mesh (0xff0000 is red, 0xff is blue).\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n text = AsciiStlData(allow_none=True, default_value=None).tag(sync=True)\n binary = BinaryStlData(allow_none=True, default_value=None).tag(sync=True)\n color = Int(min=0, max=0xffffff).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n wireframe = Bool().tag(sync=True)\n flat_shading = Bool().tag(sync=True)\n\n def __init__(self, **kwargs):\n super(STL, self).__init__(**kwargs)\n\n self.set_trait('type', 'STL')\n\n\nclass Surface(Drawable):\n \"\"\"\n Surface plot of a 2D function z = f(x, y).\n\n The default domain of the scalar field is -0.5 < x, y < 0.5.\n If the domain should be different, the bounding box needs to be transformed using the model_matrix.\n\n Attributes:\n heights: `array_like`.\n 2D scalar field of Z values.\n color: `int`.\n Packed RGB color of the resulting mesh (0xff0000 is red, 0xff is blue).\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n heights = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('heights'))\n color = Int(min=0, max=0xffffff).tag(sync=True)\n wireframe = Bool().tag(sync=True)\n flat_shading = Bool().tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(Surface, self).__init__(**kwargs)\n\n self.set_trait('type', 'Surface')\n\n\nclass Text(Drawable):\n \"\"\"\n Text rendered using KaTeX with a 3D position.\n\n Attributes:\n text: `str`.\n Content of the text.\n position: `list`.\n Coordinates (x, y, z) of the text's position.\n color: `int`.\n Packed RGB color of the text (0xff0000 is red, 0xff is blue).\n reference_point: `str`.\n Two-letter string representing the text's alignment.\n\n First letter: 'l', 'c' or 'r': left, center or right\n\n Second letter: 't', 'c' or 'b': top, center or bottom.\n size: `float`.\n Font size in 'em' HTML units.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n text = TimeSeries(Unicode()).tag(sync=True)\n position = TimeSeries(ListOrArray(minlen=3, maxlen=3)).tag(sync=True)\n color = Int(min=0, max=0xffffff).tag(sync=True)\n reference_point = Unicode().tag(sync=True)\n size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n\n def __init__(self, **kwargs):\n super(Text, self).__init__(**kwargs)\n\n self.set_trait('type', 'Text')\n\n\nclass Text2d(Drawable):\n \"\"\"\n Text rendered using KaTeX with a fixed 2D position, independent of camera settings.\n\n Attributes:\n text: `str`.\n Content of the text.\n position: `list`.\n Ratios (r_x, r_y) of the text's position in range (0, 1) - relative to canvas size.\n color: `int`.\n Packed RGB color of the text (0xff0000 is red, 0xff is blue).\n reference_point: `str`.\n Two-letter string representing the text's alignment.\n\n First letter: 'l', 'c' or 'r': left, center or right\n\n Second letter: 't', 'c' or 'b': top, center or bottom.\n size: `float`.\n Font size in 'em' HTML units.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n color = Int(min=0, max=0xffffff).tag(sync=True)\n size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n reference_point = Unicode().tag(sync=True)\n position = TimeSeries(ListOrArray(minlen=2, maxlen=2)).tag(sync=True)\n text = TimeSeries(Unicode()).tag(sync=True)\n\n def __init__(self, **kwargs):\n super(Text2d, self).__init__(**kwargs)\n\n self.set_trait('type', 'Text2d')\n\n\nclass Texture(Drawable):\n \"\"\"\n A 2D image displayed as a texture.\n\n By default, the texture image is mapped into the square: -0.5 < x, y < 0.5, z = 1.\n If the size (scale, aspect ratio) or position should be different then the texture should be transformed\n using the model_matrix.\n\n Attributes:\n binary: `bytes`.\n Image data in a specific format.\n file_format: `str`.\n Format of the data, it should be the second part of MIME format of type 'image/',\n for example 'jpeg', 'png', 'gif', 'tiff'.\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each pixels.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n binary = Bytes(allow_none=True).tag(sync=True)\n file_format = Unicode(allow_none=True).tag(sync=True)\n attribute = Array().tag(sync=True, **array_serialization_wrap('attribute'))\n color_map = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('color_map'))\n color_range = ListOrArray(minlen=2, maxlen=2, empty_ok=True).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(Texture, self).__init__(**kwargs)\n\n self.set_trait('type', 'Texture')\n\n\nclass TextureText(Drawable):\n \"\"\"\n A text in the 3D space rendered using a texture.\n\n Compared to Text and Text2d this drawable has less features (no KaTeX support), but the labels are located\n in the GPU memory, and not the browser's DOM tree. This has performance consequences, and may be preferable when\n many simple labels need to be displayed.\n\n Attributes:\n text: `str`.\n Content of the text.\n position: `list`.\n Coordinates (x, y, z) of the text's position.\n color: `int`.\n Packed RGB color of the text (0xff0000 is red, 0xff is blue).\n size: `float`.\n Size of the texture sprite containing the text.\n font_face: `str`.\n Name of the font to use for rendering the text.\n font_weight: `int`.\n Thickness of the characters in HTML-like units from the range (100, 900), where\n 400 is normal and 600 is bold font.\n font_size: `int`.\n The font size inside the sprite texture in px units. This does not affect the size of the\n text in the scene, only the accuracy and raster size of the texture.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n text = Unicode().tag(sync=True)\n position = ListOrArray(minlen=3, maxlen=3).tag(sync=True)\n color = Int(min=0, max=0xffffff).tag(sync=True)\n size = Float(min=EPSILON, default_value=1.0).tag(sync=True)\n font_face = Unicode().tag(sync=True)\n font_weight = Int().tag(sync=True)\n font_size = Int().tag(sync=True)\n\n def __init__(self, **kwargs):\n super(TextureText, self).__init__(**kwargs)\n\n self.set_trait('type', 'TextureText')\n\n\nclass VectorField(Drawable):\n \"\"\"\n A dense 3D or 2D vector field.\n\n By default, the origins of the vectors are assumed to be a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n or -0.5 < x, y < 0.5 square, regardless of the passed vector field shape (aspect ratio etc.).\n Different grid size, shape and rotation can be obtained using the model_matrix.\n\n The color of the vectors is a gradient from origin_color to head_color. Heads, when used, have uniform head_color.\n\n For sparse (i.e. not forming a grid) 3D vectors, use the `Vectors` drawable.\n\n Attributes:\n vectors: `array_like`.\n Vector field of shape (L, H, W, 3) for 3D fields or (H, W, 2) for 2D fields.\n colors: `array_like`.\n Twice the length of vectors array of int: packed RGB colors\n (0xff0000 is red, 0xff is blue).\n The array has consecutive pairs (origin_color, head_color) for vectors in row-major order.\n origin_color: `int`.\n Packed RGB color of the origins (0xff0000 is red, 0xff is blue) when `colors` is empty.\n head_color: `int`.\n Packed RGB color of the vector heads (0xff0000 is red, 0xff is blue) when `colors` is empty.\n use_head: `bool`.\n Whether vectors should display an arrow head.\n head_size: `float`.\n The size of the arrow heads.\n scale: `float`.\n Scale factor for the vector lengths, for artificially scaling the vectors in place.\n line_width: `float`.\n Width of the vector segments.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n vectors = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('vectors'))\n colors = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('colors'))\n origin_color = Int(min=0, max=0xffffff).tag(sync=True)\n head_color = Int(min=0, max=0xffffff).tag(sync=True)\n use_head = Bool().tag(sync=True)\n head_size = Float(min=EPSILON, default_value=1.0).tag(sync=True)\n scale = Float().tag(sync=True)\n line_width = Float(min=EPSILON, default_value=0.01).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(VectorField, self).__init__(**kwargs)\n\n self.set_trait('type', 'VectorField')\n\n @validate('vectors')\n def _validate_vectors(self, proposal):\n shape = proposal['value'].shape\n if len(shape) not in (3, 4) or len(shape) != shape[-1] + 1:\n raise TraitError('Vector field has invalid shape: {}, '\n 'expected (L, H, W, 3) for a 3D or (H, W, 2) for a 2D field'.format(shape))\n return np.array(proposal['value'], np.float32)\n\n\nclass Vectors(Drawable):\n \"\"\"\n 3D vectors.\n\n The color of the vectors is a gradient from origin_color to head_color. Heads, when used, have uniform head_color.\n\n For dense (i.e. forming a grid) 3D or 2D vectors, use the `VectorField` drawable.\n\n Attributes:\n vectors: `array_like`.\n The vectors as (dx, dy, dz) float triples.\n origins: `array_like`.\n Same-size array of (x, y, z) coordinates of vector origins.\n colors: `array_like`.\n Twice the length of vectors array of int: packed RGB colors\n (0xff0000 is red, 0xff is blue).\n The array has consecutive pairs (origin_color, head_color) for vectors in row-major order.\n origin_color: `int`.\n Packed RGB color of the origins (0xff0000 is red, 0xff is blue), default: same as color.\n head_color: `int`.\n Packed RGB color of the vector heads (0xff0000 is red, 0xff is blue), default: same as color.\n use_head: `bool`.\n Whether vectors should display an arrow head.\n head_size: `float`.\n The size of the arrow heads.\n labels: `list` of `str`.\n Captions to display next to the vectors.\n label_size: `float`.\n Label font size in 'em' HTML units.\n line_width: `float`.\n Width of the vector segments.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n origins = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('origins'))\n vectors = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('vectors'))\n colors = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('colors'))\n origin_color = Int(min=0, max=0xffffff).tag(sync=True)\n head_color = Int(min=0, max=0xffffff).tag(sync=True)\n use_head = Bool().tag(sync=True)\n head_size = Float(min=EPSILON, default_value=1.0).tag(sync=True)\n labels = List().tag(sync=True)\n label_size = Float(min=EPSILON, default_value=1.0).tag(sync=True)\n line_width = Float(min=EPSILON, default_value=0.01).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(Vectors, self).__init__(**kwargs)\n\n self.set_trait('type', 'Vectors')\n\n\nclass Volume(Drawable):\n \"\"\"\n 3D volumetric data.\n\n By default, the volume are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n\n Attributes:\n volume: `array_like`.\n 3D array of `float`.\n color_map: `array_like`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n opacity_function: `array`.\n A list of float tuples (attribute value, opacity), sorted by attribute value. The first\n typles should have value 0.0, the last 1.0; opacity is in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n samples: `float`.\n Number of iteration per 1 unit of space.\n alpha_coef: `float`.\n Alpha multiplier.\n shadow: `str`.\n Type of shadow on volume.\n\n Legal values are:\n\n :`off`: shadow disabled,\n\n :`on_demand`: update shadow map on demand ( self.shadow_map_update() ),\n\n :`dynamic`: update shadow map automaticaly every shadow_delay.\n shadow_delay: `float`.\n Minimum number of miliseconds between shadow map updates.\n shadow_res: `int`.\n Resolution of shadow map.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n volume = TimeSeries([Array(dtype=np.float32),\n Array(dtype=np.float16)]).tag(sync=True, **array_serialization_wrap('volume'))\n color_map = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('color_map'))\n opacity_function = TimeSeries(Array(dtype=np.float32)).tag(sync=True,\n **array_serialization_wrap('opacity_function'))\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(sync=True)\n samples = TimeSeries(Float()).tag(sync=True)\n alpha_coef = TimeSeries(Float()).tag(sync=True)\n gradient_step = TimeSeries(Float()).tag(sync=True)\n shadow = TimeSeries(Unicode()).tag(sync=True)\n shadow_res = TimeSeries(Int(min=31, max=513, default_value=128)).tag(sync=True)\n shadow_delay = TimeSeries(Float()).tag(sync=True)\n ray_samples_count = TimeSeries(Int(min=1, max=128, default_value=16)).tag(sync=True)\n focal_length = TimeSeries(Float()).tag(sync=True)\n focal_plane = TimeSeries(Float()).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(Volume, self).__init__(**kwargs)\n\n self.set_trait('type', 'Volume')\n\n def shadow_map_update(self, direction=None):\n \"\"\"Request updating the shadow map in browser.\"\"\"\n\n self.send({'msg_type': 'shadow_map_update', 'direction': direction})\n\n\nclass Voxels(Drawable):\n \"\"\"\n 3D volumetric data.\n\n Different grid size, shape and rotation can be obtained using model_matrix.\n\n Attributes:\n voxels: `array_like`.\n 3D array of `int` in range (0, 255).\n 0 means empty voxel, 1 and above refer to consecutive color_map entries.\n color_map: `array_like`.\n Flat array of `int` packed RGB colors (0xff0000 is red, 0xff is blue).\n\n The color defined at index i is for voxel value (i+1), e.g.:\n\n | color_map = [0xff, 0x00ff]\n | voxels =\n | [\n | 0, # empty voxel\n | 1, # blue voxel\n | 2 # red voxel\n | ]\n\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n opacity: `float`.\n Opacity of voxels.\n outlines: `bool`.\n Whether mesh should display with outlines.\n outlines_color: `int`.\n Packed RGB color of the resulting outlines (0xff0000 is red, 0xff is blue)\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n voxels = Array(dtype=np.uint8).tag(sync=True, **array_serialization_wrap('voxels'))\n color_map = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('voxels'))\n wireframe = Bool().tag(sync=True)\n outlines = Bool().tag(sync=True)\n outlines_color = Int(min=0, max=0xffffff).tag(sync=True)\n opacity = Float(min=0.0, max=1.0, default_value=1.0).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n click_callback = None\n\n def __init__(self, **kwargs):\n super(Voxels, self).__init__(**kwargs)\n\n self.set_trait('type', 'Voxels')\n self.on_msg(self._handle_custom_msg)\n\n def _handle_custom_msg(self, content, buffers):\n if content.get('msg_type', '') == 'click_callback':\n if self.click_callback is not None:\n self.click_callback(content['coord']['x'], content['coord']['y'], content['coord']['z'])\n\n\nclass SparseVoxels(Drawable):\n \"\"\"\n 3D volumetric data.\n\n By default, the voxels are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n Different grid size, shape and rotation can be obtained using the model_matrix.\n\n Attributes:\n sparse_voxels: `array_like`.\n 2D array of `coords` in format [[x,y,z,v],[x,y,z,v]].\n v = 0 means empty voxel, 1 and above refer to consecutive color_map entries.\n space_size: `array_like`.\n Width, Height, Length of space\n color_map: `array_like`.\n Flat array of `int` packed RGB colors (0xff0000 is red, 0xff is blue).\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n opacity: `float`.\n Opacity of voxels.\n outlines: `bool`.\n Whether mesh should display with outlines.\n outlines_color: `int`.\n Packed RGB color of the resulting outlines (0xff0000 is red, 0xff is blue)\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n sparse_voxels = Array(dtype=np.uint16).tag(sync=True, **array_serialization_wrap('sparse_voxels')).valid(\n validate_sparse_voxels\n )\n space_size = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('space_size')).valid(\n shape_validation(3)\n )\n color_map = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('color_map'))\n wireframe = Bool().tag(sync=True)\n outlines = Bool().tag(sync=True)\n outlines_color = Int(min=0, max=0xffffff).tag(sync=True)\n opacity = Float(min=0.0, max=1.0, default_value=1.0).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n click_callback = None\n\n def __init__(self, **kwargs):\n super(SparseVoxels, self).__init__(**kwargs)\n\n self.set_trait('type', 'SparseVoxels')\n self.on_msg(self._handle_custom_msg)\n\n def _handle_custom_msg(self, content, buffers):\n if content.get('msg_type', '') == 'click_callback':\n if self.click_callback is not None:\n self.click_callback(content['coord']['x'], content['coord']['y'], content['coord']['z'])\n\n\nclass VoxelsGroup(Drawable):\n \"\"\"\n 3D volumetric data.\n\n By default, the voxels are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n Different grid size, shape and rotation can be obtained using the model_matrix.\n\n Attributes:\n voxels_group: `array_like`.\n List of `chunks` in format {voxels: np.array, coord: [x,y,z], multiple: number}.\n space_size: `array_like`.\n Width, Height, Length of space\n color_map: `array_like`.\n Flat array of `int` packed RGB colors (0xff0000 is red, 0xff is blue).\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n opacity: `float`.\n Opacity of voxels.\n outlines: `bool`.\n Whether mesh should display with outlines.\n outlines_color: `int`.\n Packed RGB color of the resulting outlines (0xff0000 is red, 0xff is blue)\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n\n _hold_remeshing = Bool(default_value=False).tag(sync=True)\n\n voxels_group = List().tag(sync=True, **array_serialization_wrap('voxels_group'))\n chunks_ids = List().tag(sync=True)\n\n space_size = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('space_size'))\n color_map = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('color_map'))\n wireframe = Bool().tag(sync=True)\n outlines = Bool().tag(sync=True)\n outlines_color = Int(min=0, max=0xffffff).tag(sync=True)\n opacity = Float(min=0.0, max=1.0, default_value=1.0).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n click_callback = None\n\n def __init__(self, **kwargs):\n super(VoxelsGroup, self).__init__(**kwargs)\n\n self.set_trait('type', 'VoxelsGroup')\n self.on_msg(self._handle_custom_msg)\n\n def _handle_custom_msg(self, content, buffers):\n if content.get('msg_type', '') == 'click_callback':\n if self.click_callback is not None:\n self.click_callback(content['coord']['x'], content['coord']['y'], content['coord']['z'])\n\n\nclass VoxelsIpyDW(Drawable):\n \"\"\"\n 3D volumetric data.\n\n By default, the voxels are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n Different grid size, shape and rotation can be obtained using the model_matrix.\n\n Attributes:\n voxels: `array_like`.\n 3D array of `int` in range (0, 255).\n 0 means empty voxel, 1 and above refer to consecutive color_map entries.\n color_map: `array_like`.\n Flat array of `int` packed RGB colors (0xff0000 is red, 0xff is blue).\n\n The color defined at index i is for voxel value (i+1), e.g.:\n\n | color_map = [0xff, 0x00ff]\n | voxels =\n | [\n | 0, # empty voxel\n | 1, # blue voxel\n | 2 # red voxel\n | ]\n\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n opacity: `float`.\n Opacity of voxels.\n outlines: `bool`.\n Whether mesh should display with outlines.\n outlines_color: `int`.\n Packed RGB color of the resulting outlines (0xff0000 is red, 0xff is blue)\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n voxels = DataUnion(default_value=[], dtype=np.uint8).tag(sync=True, **data_union_serialization)\n color_map = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap('color_map'))\n wireframe = Bool().tag(sync=True)\n outlines = Bool().tag(sync=True)\n outlines_color = Int(min=0, max=0xffffff).tag(sync=True)\n click_callback = None\n opacity = Float(min=0.0, max=1.0, default_value=1.0).tag(sync=True)\n model_matrix = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap('model_matrix'))\n\n def __init__(self, **kwargs):\n super(VoxelsIpyDW, self).__init__(**kwargs)\n\n self.set_trait('type', 'Voxels')\n self.on_msg(self._handle_custom_msg)\n\n def _handle_custom_msg(self, content, buffers):\n if content.get('msg_type', '') == 'click_callback':\n if self.click_callback is not None:\n self.click_callback(content['coord']['x'], content['coord']['y'], content['coord']['z'])\n"
]
| [
[
"numpy.finfo",
"numpy.array"
]
]
|
NLP-Zoo/ReRank | [
"fb125a35b8b9ae8241e237f92d001e29847bee34"
]
| [
"distill_model.py"
]
| [
"# -*- coding: utf-8 -*-\n# @Time : 2020/9/29 9:33\n# @Author : xiaolu\n# @FileName: Distill_Model.py\n# @Software: PyCharm\nimport torch\nfrom torch import nn\nfrom transformers import BertLayer\nfrom transformers import BertModel\nfrom transformers import BertConfig\n\n\nclass BertEmbeddings(nn.Module):\n # bert词嵌入部分\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n input_shape = input_ids.size()\n seq_length = input_shape[1]\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass CModel(nn.Module):\n def __init__(self, device):\n super(CModel, self).__init__()\n self.device = device\n self.num_labels = 2\n self.config = BertConfig.from_pretrained('./roberta_pretrain/bert_config.json')\n self.embeddings = BertEmbeddings(self.config)\n\n num_layers = 3\n self.layer = nn.ModuleList([BertLayer(self.config) for _ in range(num_layers)])\n self.output = nn.Linear(self.config.hidden_size, self.num_labels) # 分类\n\n def forward(self, input_ids=None, attention_mask=None, token_type_ids=None):\n input_shape = input_ids.size()\n embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids)\n # print(embedding_output.size()) # torch.Size([2, 512, 768])\n\n # 对attention_mask进行处理\n extended_attention_mask = attention_mask[:, None, None, :]\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # 进行三层bert的计算\n layer_3_output = []\n hidden_states = embedding_output\n for i, layer_module in enumerate(self.layer):\n layer_outputs = layer_module(hidden_states, extended_attention_mask)\n hidden_states = layer_outputs[0]\n layer_3_output.append(hidden_states)\n\n output_state = hidden_states[:, 0]\n # print(output.size()) # torch.Size([2, 768])\n logits = self.output(output_state)\n logits = logits.softmax(dim=1)\n return logits, layer_3_output\n\n "
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.arange",
"torch.nn.Embedding"
]
]
|
embmike/Real-Time-Object-Detection-With-Yolo | [
"42c0b646c1e6794dbd8b9aa1e0002b4201981cfc"
]
| [
"traffic_object_detection.py"
]
| [
"from __future__ import division\nimport time\nimport torch \nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2 \nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image, letterbox_image\nimport pandas as pd\nimport random \nimport pickle as pkl\nimport argparse\n\n'''\ndef get_test_input(input_dim, CUDA):\n img = cv2.imread(\"dog-cycle-car.png\")\n img = cv2.resize(img, (input_dim, input_dim)) \n img_ = img[:,:,::-1].transpose((2,0,1))\n img_ = img_[np.newaxis,:,:,:]/255.0\n img_ = torch.from_numpy(img_).float()\n img_ = Variable(img_)\n \n if CUDA:\n img_ = img_.cuda()\n \n return img_\n'''\n\ndef prep_image(img, inp_dim):\n \"\"\"\n Prepare image for inputting to the neural network. \n \n Returns a Variable \n \"\"\"\n\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = (letterbox_image(orig_im, (inp_dim, inp_dim)))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef write(x, img):\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n return img\n\ndef arg_parse():\n \"\"\"\n Parse arguements to the detect module\n \n \"\"\"\n \n \n parser = argparse.ArgumentParser(description='YOLO v3 Video Detection Module')\n \n parser.add_argument(\"--video\", dest = 'video', help = \n \"Video to run detection upon\",\n default = \"video.avi\", type = str)\n parser.add_argument(\"--dataset\", dest = \"dataset\", help = \"Dataset on which the network has been trained\", default = \"pascal\")\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.5)\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n parser.add_argument(\"--cfg\", dest = 'cfgfile', help = \n \"Config file\",\n default = \"cfg/yolov3.cfg\", type = str)\n parser.add_argument(\"--weights\", dest = 'weightsfile', help = \n \"weightsfile\",\n default = \"yolov3.weights\", type = str)\n parser.add_argument(\"--reso\", dest = 'reso', help = \n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"416\", type = str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = arg_parse()\n confidence = float(args.confidence)\n nms_thesh = float(args.nms_thresh)\n start = 0\n\n CUDA = torch.cuda.is_available()\n\n num_classes = 80\n\n CUDA = torch.cuda.is_available()\n \n bbox_attrs = 5 + num_classes\n \n print(\"Loading network.....\")\n model = Darknet(args.cfgfile)\n model.load_weights(args.weightsfile)\n print(\"Network successfully loaded\")\n\n model.net_info[\"height\"] = args.reso\n inp_dim = int(model.net_info[\"height\"])\n assert inp_dim % 32 == 0 \n assert inp_dim > 32\n\n if CUDA:\n model.cuda()\n \n #model(get_test_input(inp_dim, CUDA), CUDA)\n\n #model.eval()\n \n videofile = args.video\n \n cap = cv2.VideoCapture(videofile)\n \n assert cap.isOpened(), 'Cannot capture source'\n \n frames = 0\n start = time.time() \n while cap.isOpened():\n \n ret, frame = cap.read()\n if ret:\n \n\n img, orig_im, dim = prep_image(frame, inp_dim)\n \n im_dim = torch.FloatTensor(dim).repeat(1,2) \n \n \n if CUDA:\n im_dim = im_dim.cuda()\n img = img.cuda()\n \n with torch.no_grad(): \n output = model(Variable(img), CUDA)\n output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n if type(output) == int:\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n cv2.imshow(\"frame\", orig_im)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n \n \n\n \n im_dim = im_dim.repeat(output.size(0), 1)\n scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)\n \n output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim[:,0].view(-1,1))/2\n output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim[:,1].view(-1,1))/2\n \n output[:,1:5] /= scaling_factor\n \n for i in range(output.shape[0]):\n output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])\n output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])\n \n classes = load_classes('data/coco.names')\n colors = pkl.load(open(\"pallete\", \"rb\"))\n \n list(map(lambda x: write(x, orig_im), output))\n \n \n cv2.imshow(\"frame\", orig_im)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n \n else:\n break\n \n\n \n \n\n"
]
| [
[
"torch.min",
"torch.autograd.Variable",
"torch.no_grad",
"torch.FloatTensor",
"torch.clamp",
"torch.from_numpy",
"torch.cuda.is_available"
]
]
|
KKowalewski24/MUM | [
"c70137e7646e33dd2c902d96ff8145e73ececc54"
]
| [
"Task6/Program/module/CFS/entropy_estimators.py"
]
| [
"# Written by Greg Ver Steeg (http://www.isi.edu/~gregv/npeet.html)\n\nimport random\nfrom math import log\n\nimport numpy as np\nimport numpy.random as nr\nimport scipy.spatial as ss\nfrom scipy.special import digamma\n\n\n# continuous estimators\n\ndef entropy(x, k=3, base=2):\n \"\"\"\n The classic K-L k-nearest neighbor continuous entropy estimator x should be a list of vectors,\n e.g. x = [[1.3],[3.7],[5.1],[2.4]] if x is a one-dimensional scalar and we have four samples\n \"\"\"\n\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n d = len(x[0])\n N = len(x)\n intens = 1e-10 # small noise to break degeneracy, see doc.\n x = [list(p + intens * nr.rand(len(x[0]))) for p in x]\n tree = ss.cKDTree(x)\n nn = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in x]\n const = digamma(N) - digamma(k) + d * log(2)\n return (const + d * np.mean(map(log, nn))) / log(base)\n\n\ndef mi(x, y, k=3, base=2):\n \"\"\"\n Mutual information of x and y; x, y should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]\n if x is a one-dimensional scalar and we have four samples\n \"\"\"\n\n assert len(x) == len(y), \"Lists should have same length\"\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n intens = 1e-10 # small noise to break degeneracy, see doc.\n x = [list(p + intens * nr.rand(len(x[0]))) for p in x]\n y = [list(p + intens * nr.rand(len(y[0]))) for p in y]\n points = zip2(x, y)\n # Find nearest neighbors in joint space, p=inf means max-norm\n tree = ss.cKDTree(points)\n dvec = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in points]\n a, b, c, d = avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(len(x))\n return (-a - b + c + d) / log(base)\n\n\ndef cmi(x, y, z, k=3, base=2):\n \"\"\"\n Mutual information of x and y, conditioned on z; x, y, z should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]\n if x is a one-dimensional scalar and we have four samples\n \"\"\"\n\n assert len(x) == len(y), \"Lists should have same length\"\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n intens = 1e-10 # small noise to break degeneracy, see doc.\n x = [list(p + intens * nr.rand(len(x[0]))) for p in x]\n y = [list(p + intens * nr.rand(len(y[0]))) for p in y]\n z = [list(p + intens * nr.rand(len(z[0]))) for p in z]\n points = zip2(x, y, z)\n # Find nearest neighbors in joint space, p=inf means max-norm\n tree = ss.cKDTree(points)\n dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]\n a, b, c, d = avgdigamma(zip2(x, z), dvec), avgdigamma(zip2(y, z), dvec), avgdigamma(z, dvec), digamma(k)\n return (-a-b+c+d)/log(base)\n\n\ndef kldiv(x, xp, k=3, base=2):\n \"\"\"\n KL Divergence between p and q for x~p(x), xp~q(x); x, xp should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]\n if x is a one-dimensional scalar and we have four samples\n \"\"\"\n\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n assert k <= len(xp) - 1, \"Set k smaller than num. samples - 1\"\n assert len(x[0]) == len(xp[0]), \"Two distributions must have same dim.\"\n d = len(x[0])\n n = len(x)\n m = len(xp)\n const = log(m) - log(n - 1)\n tree = ss.cKDTree(x)\n treep = ss.cKDTree(xp)\n nn = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in x]\n nnp = [treep.query(point, k, p=float('inf'))[0][k - 1] for point in x]\n return (const + d * np.mean(map(log, nnp)) - d * np.mean(map(log, nn))) / log(base)\n\n\n# Discrete estimators\ndef entropyd(sx, base=2):\n \"\"\"\n Discrete entropy estimator given a list of samples which can be any hashable object\n \"\"\"\n\n return entropyfromprobs(hist(sx), base=base)\n\n\ndef midd(x, y):\n \"\"\"\n Discrete mutual information estimator given a list of samples which can be any hashable object\n \"\"\"\n\n return -entropyd(list(zip(x, y))) + entropyd(x) + entropyd(y)\n\n\ndef cmidd(x, y, z):\n \"\"\"\n Discrete mutual information estimator given a list of samples which can be any hashable object\n \"\"\"\n\n return entropyd(list(zip(y, z)))+entropyd(list(zip(x, z)))-entropyd(list(zip(x, y, z)))-entropyd(z)\n\n\ndef hist(sx):\n # Histogram from list of samples\n d = dict()\n for s in sx:\n d[s] = d.get(s, 0) + 1\n return map(lambda z: float(z) / len(sx), d.values())\n\n\ndef entropyfromprobs(probs, base=2):\n # Turn a normalized list of probabilities of discrete outcomes into entropy (base 2)\n return -sum(map(elog, probs)) / log(base)\n\n\ndef elog(x):\n # for entropy, 0 log 0 = 0. but we get an error for putting log 0\n if x <= 0. or x >= 1.:\n return 0\n else:\n return x * log(x)\n\n\n# Mixed estimators\ndef micd(x, y, k=3, base=2, warning=True):\n \"\"\" If x is continuous and y is discrete, compute mutual information\n \"\"\"\n\n overallentropy = entropy(x, k, base)\n n = len(y)\n word_dict = dict()\n for sample in y:\n word_dict[sample] = word_dict.get(sample, 0) + 1. / n\n yvals = list(set(word_dict.keys()))\n\n mi = overallentropy\n for yval in yvals:\n xgiveny = [x[i] for i in range(n) if y[i] == yval]\n if k <= len(xgiveny) - 1:\n mi -= word_dict[yval] * entropy(xgiveny, k, base)\n else:\n if warning:\n print(\"Warning, after conditioning, on y={0} insufficient data. Assuming maximal entropy in this case.\".format(yval))\n mi -= word_dict[yval]*overallentropy\n return mi # units already applied\n\n\n# Utility functions\ndef vectorize(scalarlist):\n \"\"\"\n Turn a list of scalars into a list of one-d vectors\n \"\"\"\n\n return [(x,) for x in scalarlist]\n\n\ndef shuffle_test(measure, x, y, z=False, ns=200, ci=0.95, **kwargs):\n \"\"\"\n Shuffle test\n Repeatedly shuffle the x-values and then estimate measure(x,y,[z]).\n Returns the mean and conf. interval ('ci=0.95' default) over 'ns' runs, 'measure' could me mi,cmi,\n e.g. Keyword arguments can be passed. Mutual information and CMI should have a mean near zero.\n \"\"\"\n\n xp = x[:] # A copy that we can shuffle\n outputs = []\n for i in range(ns):\n random.shuffle(xp)\n if z:\n outputs.append(measure(xp, y, z, **kwargs))\n else:\n outputs.append(measure(xp, y, **kwargs))\n outputs.sort()\n return np.mean(outputs), (outputs[int((1. - ci) / 2 * ns)], outputs[int((1. + ci) / 2 * ns)])\n\n\n# Internal functions\ndef avgdigamma(points, dvec):\n # This part finds number of neighbors in some radius in the marginal space\n # returns expectation value of <psi(nx)>\n N = len(points)\n tree = ss.cKDTree(points)\n avg = 0.\n for i in range(N):\n dist = dvec[i]\n # subtlety, we don't include the boundary point,\n # but we are implicitly adding 1 to kraskov def bc center point is included\n num_points = len(tree.query_ball_point(points[i], dist - 1e-15, p=float('inf')))\n avg += digamma(num_points) / N\n return avg\n\n\ndef zip2(*args):\n # zip2(x,y) takes the lists of vectors and makes it a list of vectors in a joint space\n # E.g. zip2([[1],[2],[3]],[[4],[5],[6]]) = [[1,4],[2,5],[3,6]]\n return [sum(sublist, []) for sublist in zip(*args)]\n"
]
| [
[
"scipy.spatial.cKDTree",
"scipy.special.digamma",
"numpy.mean"
]
]
|
aam-at/cleverhans_tf2 | [
"b5a122a8c8bd81c5f3cbaa58d63c4eb4dc5e344e"
]
| [
"examples/multigpu_advtrain/resnet_tf.py"
]
| [
"# https://github.com/tensorflow/models/blob/master/research/resnet/resnet_model.py\n#\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"ResNet model.\n\nRelated papers:\nhttps://arxiv.org/pdf/1603.05027v2.pdf\nhttps://arxiv.org/pdf/1512.03385v1.pdf\nhttps://arxiv.org/pdf/1605.07146v1.pdf\n\"\"\"\nfrom collections import namedtuple\n\nimport tensorflow as tf\nimport six\n\nfrom model import MLPnGPU\nfrom model import Conv2DnGPU\nfrom model import LinearnGPU\nfrom model import LayerNorm\n\nHParams = namedtuple('HParams',\n 'batch_size, nb_classes, min_lrn_rate, lrn_rate, '\n 'num_residual_units, use_bottleneck, weight_decay_rate, '\n 'relu_leakiness, momentum')\n\n\nclass ResNetTF(MLPnGPU):\n \"\"\"ResNet model.\"\"\"\n\n def __init__(self, batch_size=None, name=None, **kwargs):\n NB_CLASSES = 10\n super(ResNetTF, self).__init__(nb_classes=NB_CLASSES, layers=[],\n input_shape=None)\n self.global_step = tf.contrib.framework.get_or_create_global_step()\n self.hps = HParams(batch_size=batch_size,\n nb_classes=NB_CLASSES,\n min_lrn_rate=0.0001,\n lrn_rate=0.1,\n num_residual_units=5,\n use_bottleneck=False,\n weight_decay_rate=0.0002,\n relu_leakiness=0.1,\n momentum=.9)\n self.layers = []\n self.layer_idx = 0\n self.init_layers = True\n self.decay_cost = None\n self.training = None\n self.device_name = None\n\n def set_training(self, training=False):\n super(ResNetTF, self).set_training(training)\n self.training = training\n\n def fprop(self, x):\n self.layer_idx = 0\n with tf.compat.v1.variable_scope('Resnet'):\n logits, probs = self._build_model(x)\n self.init_layers = False\n states = {'logits': logits, 'probs': probs}\n return states\n\n def _stride_arr(self, stride):\n \"\"\"Map a stride scalar to the stride array for tf.nn.conv2d.\"\"\"\n return [1, stride, stride, 1]\n\n def _build_model(self, x):\n \"\"\"Build the core model within the graph.\"\"\"\n with tf.compat.v1.variable_scope('init'):\n x = self._conv('init_conv', x, 3, x.shape[3], 16,\n self._stride_arr(1))\n\n strides = [1, 2, 2]\n activate_before_residual = [True, False, False]\n if self.hps.use_bottleneck:\n res_func = self._bottleneck_residual\n filters = [16, 64, 128, 256]\n else:\n res_func = self._residual\n filters = [16, 16, 32, 64]\n # Uncomment the following codes to use w28-10 wide residual\n # network.\n # It is more memory efficient than very deep residual network and\n # has\n # comparably good performance.\n # https://arxiv.org/pdf/1605.07146v1.pdf\n # filters = [16, 160, 320, 640]\n # Update hps.num_residual_units to 4\n\n with tf.compat.v1.variable_scope('unit_1_0'):\n x = res_func(x, filters[0], filters[1],\n self._stride_arr(strides[0]),\n activate_before_residual[0])\n for i in six.moves.range(1, self.hps.num_residual_units):\n with tf.compat.v1.variable_scope('unit_1_%d' % i):\n x = res_func(x, filters[1], filters[1],\n self._stride_arr(1), False)\n\n with tf.compat.v1.variable_scope('unit_2_0'):\n x = res_func(x, filters[1], filters[2],\n self._stride_arr(strides[1]),\n activate_before_residual[1])\n for i in six.moves.range(1, self.hps.num_residual_units):\n with tf.compat.v1.variable_scope('unit_2_%d' % i):\n x = res_func(x, filters[2], filters[2],\n self._stride_arr(1), False)\n\n with tf.compat.v1.variable_scope('unit_3_0'):\n x = res_func(x, filters[2], filters[3],\n self._stride_arr(strides[2]),\n activate_before_residual[2])\n for i in six.moves.range(1, self.hps.num_residual_units):\n with tf.compat.v1.variable_scope('unit_3_%d' % i):\n x = res_func(x, filters[3], filters[3],\n self._stride_arr(1), False)\n\n with tf.compat.v1.variable_scope('unit_last'):\n x = self._layer_norm('final_bn', x)\n x = self._relu(x, self.hps.relu_leakiness)\n x = self._global_avg_pool(x)\n\n with tf.compat.v1.variable_scope('logit'):\n logits = self._fully_connected(x, self.hps.nb_classes)\n predictions = tf.nn.softmax(logits)\n\n return logits, predictions\n\n def build_cost(self, labels, logits):\n \"\"\"\n Build the graph for cost from the logits if logits are provided.\n If predictions are provided, logits are extracted from the operation.\n \"\"\"\n op = logits.op\n if \"softmax\" in str(op).lower():\n logits, = op.inputs\n\n with tf.compat.v1.variable_scope('costs'):\n xent = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=tf.stop_gradient(labels))\n cost = tf.reduce_mean(input_tensor=xent, name='xent')\n cost += self._decay()\n cost = cost\n\n return cost\n\n def build_train_op_from_cost(self, cost):\n \"\"\"Build training specific ops for the graph.\"\"\"\n self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32,\n name='learning_rate')\n self.momentum = tf.constant(self.hps.momentum, tf.float32,\n name='momentum')\n\n trainable_variables = tf.compat.v1.trainable_variables()\n grads = tf.gradients(ys=cost, xs=trainable_variables)\n devs = {v.device for v in trainable_variables}\n assert len(devs) == 1, ('There should be no trainable variables'\n ' on any device other than the last GPU.')\n\n optimizer = tf.compat.v1.train.MomentumOptimizer(self.lrn_rate, self.momentum)\n\n gv_pairs = zip(grads, trainable_variables)\n gv_pairs = [gv for gv in gv_pairs if gv[0] is not None]\n devs = {gv[1].device for gv in gv_pairs}\n assert len(devs) == 1, ('There should be no gradients wrt'\n ' vars on other GPUs.')\n\n apply_op = optimizer.apply_gradients(\n gv_pairs,\n global_step=self.global_step, name='train_step')\n\n train_ops = [apply_op]\n train_op = tf.group(*train_ops)\n return train_op\n\n def _layer_norm(self, name, x):\n \"\"\"Layer normalization.\"\"\"\n if self.init_layers:\n bn = LayerNorm()\n bn.name = name\n self.layers += [bn]\n else:\n bn = self.layers[self.layer_idx]\n self.layer_idx += 1\n bn.device_name = self.device_name\n bn.set_training(self.training)\n x = bn.fprop(x)\n return x\n\n def _residual(self, x, in_filter, out_filter, stride,\n activate_before_residual=False):\n \"\"\"Residual unit with 2 sub layers.\"\"\"\n if activate_before_residual:\n with tf.compat.v1.variable_scope('shared_activation'):\n x = self._layer_norm('init_bn', x)\n x = self._relu(x, self.hps.relu_leakiness)\n orig_x = x\n else:\n with tf.compat.v1.variable_scope('residual_only_activation'):\n orig_x = x\n x = self._layer_norm('init_bn', x)\n x = self._relu(x, self.hps.relu_leakiness)\n\n with tf.compat.v1.variable_scope('sub1'):\n x = self._conv('conv1', x, 3, in_filter, out_filter, stride)\n\n with tf.compat.v1.variable_scope('sub2'):\n x = self._layer_norm('bn2', x)\n x = self._relu(x, self.hps.relu_leakiness)\n x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])\n\n with tf.compat.v1.variable_scope('sub_add'):\n if in_filter != out_filter:\n orig_x = tf.nn.avg_pool2d(input=orig_x, ksize=stride, strides=stride, padding='VALID')\n orig_x = tf.pad(\n tensor=orig_x, paddings=[[0, 0], [0, 0], [0, 0],\n [(out_filter - in_filter) // 2,\n (out_filter - in_filter) // 2]])\n x += orig_x\n\n return x\n\n def _bottleneck_residual(self, x, in_filter, out_filter, stride,\n activate_before_residual=False):\n \"\"\"Bottleneck residual unit with 3 sub layers.\"\"\"\n if activate_before_residual:\n with tf.compat.v1.variable_scope('common_bn_relu'):\n x = self._layer_norm('init_bn', x)\n x = self._relu(x, self.hps.relu_leakiness)\n orig_x = x\n else:\n with tf.compat.v1.variable_scope('residual_bn_relu'):\n orig_x = x\n x = self._layer_norm('init_bn', x)\n x = self._relu(x, self.hps.relu_leakiness)\n\n with tf.compat.v1.variable_scope('sub1'):\n x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride)\n\n with tf.compat.v1.variable_scope('sub2'):\n x = self._layer_norm('bn2', x)\n x = self._relu(x, self.hps.relu_leakiness)\n x = self._conv('conv2', x, 3, out_filter / 4,\n out_filter / 4, [1, 1, 1, 1])\n\n with tf.compat.v1.variable_scope('sub3'):\n x = self._layer_norm('bn3', x)\n x = self._relu(x, self.hps.relu_leakiness)\n x = self._conv('conv3', x, 1, out_filter /\n 4, out_filter, [1, 1, 1, 1])\n\n with tf.compat.v1.variable_scope('sub_add'):\n if in_filter != out_filter:\n orig_x = self._conv('project', orig_x, 1,\n in_filter, out_filter, stride)\n x += orig_x\n\n return x\n\n def _decay(self):\n \"\"\"L2 weight decay loss.\"\"\"\n if self.decay_cost is not None:\n return self.decay_cost\n\n costs = []\n if self.device_name is None:\n for var in tf.compat.v1.trainable_variables():\n if var.op.name.find(r'DW') > 0:\n costs.append(tf.nn.l2_loss(var))\n else:\n for layer in self.layers:\n for var in layer.params_device[self.device_name].values():\n if (isinstance(var, tf.Variable) and var.op.name.find(r'DW') > 0):\n costs.append(tf.nn.l2_loss(var))\n\n self.decay_cost = tf.multiply(self.hps.weight_decay_rate,\n tf.add_n(costs))\n return self.decay_cost\n\n def _conv(self, name, x, filter_size, in_filters, out_filters, strides):\n \"\"\"Convolution.\"\"\"\n if self.init_layers:\n conv = Conv2DnGPU(out_filters,\n (filter_size, filter_size),\n strides[1:3], 'SAME', w_name='DW')\n conv.name = name\n self.layers += [conv]\n else:\n conv = self.layers[self.layer_idx]\n self.layer_idx += 1\n conv.device_name = self.device_name\n conv.set_training(self.training)\n return conv.fprop(x)\n\n def _relu(self, x, leakiness=0.0):\n \"\"\"Relu, with optional leaky support.\"\"\"\n return tf.compat.v1.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')\n\n def _fully_connected(self, x, out_dim):\n \"\"\"FullyConnected layer for final output.\"\"\"\n if self.init_layers:\n fc = LinearnGPU(out_dim, w_name='DW')\n fc.name = 'logits'\n self.layers += [fc]\n else:\n fc = self.layers[self.layer_idx]\n self.layer_idx += 1\n fc.device_name = self.device_name\n fc.set_training(self.training)\n return fc.fprop(x)\n\n def _global_avg_pool(self, x):\n assert x.get_shape().ndims == 4\n return tf.reduce_mean(input_tensor=x, axis=[1, 2])\n"
]
| [
[
"tensorflow.less",
"tensorflow.group",
"tensorflow.contrib.framework.get_or_create_global_step",
"tensorflow.compat.v1.train.MomentumOptimizer",
"tensorflow.compat.v1.variable_scope",
"tensorflow.gradients",
"tensorflow.add_n",
"tensorflow.stop_gradient",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.constant",
"tensorflow.nn.l2_loss",
"tensorflow.nn.softmax",
"tensorflow.pad",
"tensorflow.reduce_mean",
"tensorflow.nn.avg_pool2d"
]
]
|
theGreenJedi/neon | [
"b85ba0fbbb0458d8a8599e5ead335959b10318c1"
]
| [
"tests/test_gru.py"
]
| [
"# ----------------------------------------------------------------------------\n# Copyright 2015-2016 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\"\"\"\nThis test compares the NEON GRU layer against a numpy reference GRU\nimplementation and compares the NEON GRU bprop deltas to the gradients\nestimated by finite differences.\nThe numpy reference GRU contains static methods for forward pass\nand backward pass.\nIt runs a SINGLE layer of GRU and compare numerical values\n\nThe following are made sure to be the same in both GRUs\n - initial h values (all zeros)\n - initial W, b (ones or random values)\n - input data (random data matrix)\n - input error (random data matrix)\n - the data shape inside GRU_ref is seq_len, 1, input_size.\n Need transpose\n - the data shape inside GRU (neon) is is batch_size, seq_len * batch_size\n\n\"\"\"\nimport itertools as itt\nimport numpy as np\n\nfrom neon import NervanaObject, logger as neon_logger\nfrom neon.initializers.initializer import Constant, Gaussian\nfrom neon.layers import GRU\nfrom neon.transforms import Logistic, Tanh\nfrom gru_ref import GRU as RefGRU\nfrom utils import allclose_with_out\n\n\ndef pytest_generate_tests(metafunc):\n bsz_rng = [1]\n\n if 'refgruargs' in metafunc.fixturenames:\n fargs = []\n if metafunc.config.option.all:\n seq_rng = [2, 3, 4]\n inp_rng = [3, 5, 10]\n out_rng = [3, 5, 10]\n else:\n seq_rng = [3]\n inp_rng = [5]\n out_rng = [10]\n fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)\n metafunc.parametrize('refgruargs', fargs)\n\n if 'gradgruargs' in metafunc.fixturenames:\n fargs = []\n if metafunc.config.option.all:\n seq_rng = [2, 3]\n inp_rng = [5, 10]\n out_rng = [3, 5, 10]\n else:\n seq_rng = [3]\n inp_rng = [5]\n out_rng = [10]\n fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)\n metafunc.parametrize('gradgruargs', fargs)\n\n\ndef test_ref_compare_ones(backend_default, refgruargs):\n # run comparison with reference code\n # for all ones init\n seq_len, input_size, hidden_size, batch_size = refgruargs\n NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size\n\n check_gru(seq_len, input_size, hidden_size,\n batch_size, Constant(val=1.0), [1.0, 0.0])\n\n\ndef test_ref_compare_rand(backend_default, refgruargs):\n # run comparison with reference code\n # for all ones init\n seq_len, input_size, hidden_size, batch_size = refgruargs\n NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size\n\n check_gru(seq_len, input_size, hidden_size, batch_size,\n Gaussian())\n\n\n# compare neon GRU to reference GRU implementation\ndef check_gru(seq_len, input_size, hidden_size,\n batch_size, init_func, inp_moms=[0.0, 1.0]):\n # init_func is the initializer for the model params\n # inp_moms is the [ mean, std dev] of the random input\n input_shape = (input_size, seq_len * batch_size)\n output_shape = (hidden_size, seq_len * batch_size)\n NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size\n\n # neon GRU\n gru = GRU(hidden_size,\n init_func,\n activation=Tanh(),\n gate_activation=Logistic())\n\n # generate random input tensor\n inp = np.random.rand(*input_shape) * inp_moms[1] + inp_moms[0]\n inpa = gru.be.array(inp)\n # generate random deltas tensor\n deltas = np.random.randn(*output_shape)\n\n # run neon fprop\n gru.configure((input_size, seq_len))\n gru.prev_layer = True\n gru.allocate()\n gru.set_deltas([gru.be.iobuf(gru.in_shape)])\n gru.fprop(inpa)\n\n # reference numpy GRU\n gru_ref = RefGRU(input_size, hidden_size)\n WGRU = gru_ref.weights\n\n # make ref weights and biases the same with neon model\n r_range = list(range(hidden_size))\n z_range = list(range(hidden_size, hidden_size * 2))\n c_range = list(range(hidden_size * 2, hidden_size * 3))\n\n WGRU[gru_ref.weights_ind_br][:] = gru.b.get()[r_range]\n WGRU[gru_ref.weights_ind_bz][:] = gru.b.get()[z_range]\n WGRU[gru_ref.weights_ind_bc][:] = gru.b.get()[c_range]\n\n WGRU[gru_ref.weights_ind_Wxr][:] = gru.W_input.get()[r_range]\n WGRU[gru_ref.weights_ind_Wxz][:] = gru.W_input.get()[z_range]\n WGRU[gru_ref.weights_ind_Wxc][:] = gru.W_input.get()[c_range]\n\n WGRU[gru_ref.weights_ind_Rhr][:] = gru.W_recur.get()[r_range]\n WGRU[gru_ref.weights_ind_Rhz][:] = gru.W_recur.get()[z_range]\n WGRU[gru_ref.weights_ind_Rhc][:] = gru.W_recur.get()[c_range]\n\n # transpose input X and do fprop\n # the reference code expects these shapes:\n # input_shape: (seq_len, input_size, batch_size)\n # output_shape: (seq_len, hidden_size, batch_size)\n inp_ref = inp.copy().T.reshape(\n seq_len, batch_size, input_size).swapaxes(1, 2)\n deltas_ref = deltas.copy().T.reshape(\n seq_len, batch_size, hidden_size).swapaxes(1, 2)\n\n (dWGRU_ref, h_ref_list, dh_ref_list,\n dr_ref_list, dz_ref_list, dc_ref_list) = gru_ref.lossFun(inp_ref,\n deltas_ref)\n\n neon_logger.display('====Verifying hidden states====')\n neon_logger.display(allclose_with_out(gru.outputs.get(),\n h_ref_list,\n rtol=0.0,\n atol=1.0e-5))\n\n neon_logger.display('fprop is verified')\n\n # now test the bprop\n neon_logger.display('Making sure neon GRU matches numpy GRU in bprop')\n gru.bprop(gru.be.array(deltas))\n # grab the delta W from gradient buffer\n dWinput_neon = gru.dW_input.get()\n dWrecur_neon = gru.dW_recur.get()\n db_neon = gru.db.get()\n dWxr_neon = dWinput_neon[r_range]\n dWxz_neon = dWinput_neon[z_range]\n dWxc_neon = dWinput_neon[c_range]\n dWrr_neon = dWrecur_neon[r_range]\n dWrz_neon = dWrecur_neon[z_range]\n dWrc_neon = dWrecur_neon[c_range]\n dbr_neon = db_neon[r_range]\n dbz_neon = db_neon[z_range]\n dbc_neon = db_neon[c_range]\n\n drzc_neon = gru.rzhcan_delta_buffer.get()\n dr_neon = drzc_neon[r_range]\n dz_neon = drzc_neon[z_range]\n dc_neon = drzc_neon[c_range]\n\n dWxr_ref = dWGRU_ref[gru_ref.dW_ind_Wxr]\n dWxz_ref = dWGRU_ref[gru_ref.dW_ind_Wxz]\n dWxc_ref = dWGRU_ref[gru_ref.dW_ind_Wxc]\n dWrr_ref = dWGRU_ref[gru_ref.dW_ind_Rhr]\n dWrz_ref = dWGRU_ref[gru_ref.dW_ind_Rhz]\n dWrc_ref = dWGRU_ref[gru_ref.dW_ind_Rhc]\n dbr_ref = dWGRU_ref[gru_ref.dW_ind_br]\n dbz_ref = dWGRU_ref[gru_ref.dW_ind_bz]\n dbc_ref = dWGRU_ref[gru_ref.dW_ind_bc]\n\n # neon_logger.display '====Verifying hidden deltas ===='\n neon_logger.display('====Verifying r deltas ====')\n assert allclose_with_out(dr_neon,\n dr_ref_list,\n rtol=0.0,\n atol=1.0e-5)\n\n neon_logger.display('====Verifying z deltas ====')\n assert allclose_with_out(dz_neon,\n dz_ref_list,\n rtol=0.0,\n atol=1.0e-5)\n\n neon_logger.display('====Verifying hcan deltas ====')\n assert allclose_with_out(dc_neon,\n dc_ref_list,\n rtol=0.0,\n atol=1.0e-5)\n\n neon_logger.display('====Verifying update on W_input====')\n neon_logger.display('dWxr')\n assert allclose_with_out(dWxr_neon,\n dWxr_ref,\n rtol=0.0,\n atol=1.0e-5)\n neon_logger.display('dWxz')\n assert allclose_with_out(dWxz_neon,\n dWxz_ref,\n rtol=0.0,\n atol=1.0e-5)\n neon_logger.display('dWxc')\n assert allclose_with_out(dWxc_neon,\n dWxc_ref,\n rtol=0.0,\n atol=1.0e-5)\n\n neon_logger.display('====Verifying update on W_recur====')\n\n neon_logger.display('dWrr')\n assert allclose_with_out(dWrr_neon,\n dWrr_ref,\n rtol=0.0,\n atol=1.0e-5)\n neon_logger.display('dWrz')\n assert allclose_with_out(dWrz_neon,\n dWrz_ref,\n rtol=0.0,\n atol=1.0e-5)\n neon_logger.display('dWrc')\n assert allclose_with_out(dWrc_neon,\n dWrc_ref,\n rtol=0.0,\n atol=1.0e-5)\n\n neon_logger.display('====Verifying update on bias====')\n neon_logger.display('dbr')\n assert allclose_with_out(dbr_neon,\n dbr_ref,\n rtol=0.0,\n atol=1.0e-5)\n neon_logger.display('dbz')\n assert allclose_with_out(dbz_neon,\n dbz_ref,\n rtol=0.0,\n atol=1.0e-5)\n neon_logger.display('dbc')\n assert allclose_with_out(dbc_neon,\n dbc_ref,\n rtol=0.0,\n atol=1.0e-5)\n\n neon_logger.display('bprop is verified')\n\n return\n\n\ndef reset_gru(gru):\n # in order to run fprop multiple times\n # for the gradient check tests the\n # gru internal variables need to be\n # cleared\n gru.x = None\n gru.xs = None # just in case\n gru.outputs = None\n return\n\n\ndef test_gradient_neon_gru(backend_default, gradgruargs):\n seq_len, input_size, hidden_size, batch_size = gradgruargs\n NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size\n gradient_check(seq_len, input_size, hidden_size, batch_size)\n\n\ndef gradient_check(seq_len, input_size, hidden_size, batch_size,\n threshold=1.0e-3):\n # 'threshold' is the max fractional difference\n # between gradient estimate and\n # bprop deltas (def is 5%)\n # for a given set of layer parameters calculate\n # the gradients and compare to the derivatives\n # obtained with the bprop function. repeat this\n # for a range of perturbations and use the\n # perturbation size with the best results.\n # This is necessary for 32 bit computations\n\n min_max_err = -1.0 # minimum max error\n neon_logger.display('Perturb mag, max grad diff')\n for pert_exp in range(-5, 0):\n # need to generate the scaling and input outside\n # having an issue with the random number generator\n # when these are generated inside the gradient_calc\n # function\n input_shape = (input_size, seq_len * batch_size)\n output_shape = (hidden_size, seq_len * batch_size)\n\n rand_scale = np.random.random(output_shape) * 2.0 - 1.0\n inp = np.random.randn(*input_shape)\n\n pert_mag = 10.0**pert_exp\n (grad_est, deltas) = gradient_calc(seq_len,\n input_size,\n hidden_size,\n batch_size,\n epsilon=pert_mag,\n rand_scale=rand_scale,\n inp_bl=inp)\n dd = np.max(np.abs(grad_est - deltas))\n neon_logger.display('%e, %e' % (pert_mag, dd))\n if min_max_err < 0.0 or dd < min_max_err:\n min_max_err = dd\n # reset the seed so models are same in each run\n # allclose_with_out(grad_est,deltas, rtol=0.0, atol=0.0)\n NervanaObject.be.rng_reset()\n\n # check that best value of worst case error is less than threshold\n neon_logger.display('Worst case error %e with perturbation %e' % (min_max_err, pert_mag))\n neon_logger.display('Threshold %e' % (threshold))\n assert min_max_err < threshold\n\n\ndef gradient_calc(seq_len, input_size, hidden_size, batch_size,\n epsilon=None, rand_scale=None, inp_bl=None):\n NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size\n\n input_shape = (input_size, seq_len * batch_size)\n\n # generate input if one is not given\n if inp_bl is None:\n inp_bl = np.random.randn(*input_shape)\n\n # neon gru instance\n gru = GRU(hidden_size, init=Gaussian(), activation=Tanh(), gate_activation=Logistic())\n inpa = gru.be.array(np.copy(inp_bl))\n\n # run fprop on the baseline input\n gru.configure((input_size, seq_len))\n gru.prev_layer = True\n gru.allocate()\n gru.set_deltas([gru.be.iobuf(gru.in_shape)])\n out_bl = gru.fprop(inpa).get()\n\n # random scaling/hash to generate fake loss\n if rand_scale is None:\n rand_scale = np.random.random(out_bl.shape) * 2.0 - 1.0\n # loss function would be:\n # loss_bl = np.sum(rand_scale * out_bl)\n\n # run back prop with rand_scale as the errors\n # use copy to avoid any interactions\n deltas_neon = gru.bprop(gru.be.array(np.copy(rand_scale))).get()\n\n # add a perturbation to each input element\n grads_est = np.zeros(inpa.shape)\n inp_pert = inp_bl.copy()\n for pert_ind in range(inpa.size):\n save_val = inp_pert.flat[pert_ind]\n\n inp_pert.flat[pert_ind] = save_val + epsilon\n reset_gru(gru)\n gru.allocate()\n out_pos = gru.fprop(gru.be.array(inp_pert)).get()\n\n inp_pert.flat[pert_ind] = save_val - epsilon\n reset_gru(gru)\n gru.allocate()\n out_neg = gru.fprop(gru.be.array(inp_pert)).get()\n\n # calculate the loss with perturbations\n loss_pos = np.sum(rand_scale * out_pos)\n loss_neg = np.sum(rand_scale * out_neg)\n # compute the gradient estimate\n grad = 0.5 / float(epsilon) * (loss_pos - loss_neg)\n\n grads_est.flat[pert_ind] = grad\n\n # reset the perturbed input element\n inp_pert.flat[pert_ind] = save_val\n\n del gru\n return (grads_est, deltas_neon)\n"
]
| [
[
"numpy.random.rand",
"numpy.zeros",
"numpy.sum",
"numpy.copy",
"numpy.random.randn",
"numpy.abs",
"numpy.random.random"
]
]
|
KarizCache/serverless | [
"c5735afee29e104f3909f3b0140e993d461a5420"
]
| [
"benchmark/task-bench/dask/task_bench_core.py"
]
| [
"#!/usr/bin/env python\n#\n# Copyright 2020 Stanford University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import absolute_import, division, print_function\n\nimport cffi\nimport dask\nimport numpy as np\nimport os\nimport subprocess\n\n# Hack: This is in its own module to avoid having this get pickled, as\n# the CFFI handles are (obviously) unpickleable. By default Dask uses\n# cloudpickle to pickle tasks, which appears to do introspection on\n# their ASTs to figure out what globals they capture. Fortunately\n# cloudpickle does not also try to introspect the contents of locally\n# imported modules....\n\nroot_dir = os.path.dirname(os.path.dirname(__file__))\ncore_header = subprocess.check_output(\n [\n \"gcc\", \"-D\", \"__attribute__(x)=\", \"-E\", \"-P\",\n os.path.join(root_dir, \"core/core_c.h\")\n ]).decode(\"utf-8\")\nffi = cffi.FFI()\nffi.cdef(core_header)\nc = ffi.dlopen(\"libcore.so\")\n\n\ndef init_client():\n import argparse\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-scheduler', required=False)\n parser.add_argument('-name', required=False)\n parser.add_argument('-expect-workers', type=int, default=0)\n args, unknown = parser.parse_known_args()\n\n print(args.scheduler)\n if args.scheduler:\n from dask.distributed import Client\n client = Client(args.scheduler, name=args.name)\n if args.expect_workers > 0:\n while True:\n num_workers = len(client.ncores())\n if num_workers >= args.expect_workers:\n break\n print(\n 'Client waiting for workers (have %s expect %s)' %\n (num_workers, args.expect_workers),\n flush=True)\n import time\n time.sleep(5)\n else:\n client = None\n\n return client\n\n\ndef encode_task_graph(graph):\n return np.frombuffer(\n ffi.buffer(ffi.addressof(graph), ffi.sizeof(graph)), dtype=np.ubyte)\n\n\ndef decode_task_graph(graph_array):\n return ffi.cast(\"task_graph_t *\", graph_array.ctypes.data)[0]\n\n\ndef app_create(args):\n c_args = []\n c_argv = ffi.new(\"char *[]\", len(args) + 1)\n for i, arg in enumerate(args):\n c_args.append(ffi.new(\"char []\", arg.encode('utf-8')))\n c_argv[i] = c_args[-1]\n c_argv[len(args)] = ffi.NULL\n\n app = c.app_create(len(args), c_argv)\n c.app_display(app)\n return app\n\n\ndef app_task_graphs(app):\n result = []\n graphs = c.app_task_graphs(app)\n for i in range(c.task_graph_list_num_task_graphs(graphs)):\n result.append(c.task_graph_list_task_graph(graphs, i))\n\n return result\n\n\ndef task_graph_dependencies(graph, timestep, point):\n last_offset = c.task_graph_offset_at_timestep(graph, timestep - 1)\n last_width = c.task_graph_width_at_timestep(graph, timestep - 1)\n\n if timestep == 0:\n last_offset, last_width = 0, 0\n\n dset = c.task_graph_dependence_set_at_timestep(graph, timestep)\n ilist = c.task_graph_dependencies(graph, dset, point)\n for i in range(0, c.interval_list_num_intervals(ilist)):\n interval = c.interval_list_interval(ilist, i)\n for dep in range(interval.start, interval.end + 1):\n if last_offset <= dep < last_offset + last_width:\n yield dep\n\n\nimport math \n\ndef execute_point_impl(graph_array, timestep, point, scratch, *inputs):\n graph = decode_task_graph(graph_array)\n\n print(\"Heeeelllllooo I am calling you\")\n input_ptrs = ffi.new(\n \"char *[]\", [ffi.cast(\"char *\", i.ctypes.data) for i in inputs])\n input_sizes = ffi.new(\"size_t []\", [i.shape[0] for i in inputs])\n \n\n print('------------------------>', type(graph.output_bytes_per_task), int(math.pow(2, point)))\n output_bytes = int(int(graph.output_bytes_per_task)//int(math.pow(2, point)))\n print(output_bytes, type(output_bytes))\n output = np.empty(output_bytes, dtype=np.ubyte)\n output_ptr = ffi.cast(\"char *\", output.ctypes.data)\n\n if scratch is not None:\n scratch_ptr = ffi.cast(\"char *\", scratch.ctypes.data)\n scratch_size = scratch.shape[0]\n else:\n scratch_ptr = ffi.NULL\n scratch_size = 0\n\n c.task_graph_execute_point_scratch(\n graph, timestep, point, output_ptr, output.shape[0], input_ptrs,\n input_sizes, len(inputs), scratch_ptr, scratch_size)\n\n return output\n\n\[email protected](nout=2)\ndef execute_point_scratch(graph_array, timestep, point, scratch, *inputs):\n return execute_point_impl(\n graph_array, timestep, point, scratch, *inputs), scratch\n\n_timestep = -1\n_point = -1\[email protected]\ndef execute_point_no_scratch(graph_array, timestep, point, *inputs, dask_key_name=None):\n return execute_point_impl(graph_array, timestep, point, None, *inputs)\n\n\ndef init_scratch_direct(scratch_bytes):\n scratch = np.empty(scratch_bytes, dtype=np.ubyte)\n scratch_ptr = ffi.cast(\"char *\", scratch.ctypes.data)\n c.task_graph_prepare_scratch(scratch_ptr, scratch_bytes)\n return scratch\n\n\[email protected]\ndef init_scratch_delayed(scratch_bytes):\n return init_scratch_direct(scratch_bytes)\n\n\[email protected]\ndef join(*args):\n pass\n\n\ndef splitter(value, idx):\n return value[idx]\n\n\n# Entry point for direct graph construction\ndef execute_point_direct(graph_array, timestep, point, scratch, *inputs):\n if scratch is not None:\n return execute_point_impl(\n graph_array, timestep, point, scratch, *inputs), scratch\n else:\n return execute_point_impl(graph_array, timestep, point, None, *inputs)\n\n\n# Entry points for dask.delayed\ndef execute_point_delayed(graph_array, timestep, point, scratch, *inputs):\n if scratch is not None:\n return execute_point_scratch(\n graph_array, timestep, point, scratch, *inputs)\n else:\n return execute_point_no_scratch(\n graph_array, timestep, point, *inputs, dask_key_name=f'execute_point_no_scratch-{timestep}-{point}'), None\n"
]
| [
[
"numpy.empty"
]
]
|
jstac/production_chains | [
"8e68ffab1902fa31f0bd839de142bd7cb4361777"
]
| [
"assorted_plots.py"
]
| [
"\"\"\"\nPlots used for the JET revision.\n\nAugust 2014\n\n\"\"\"\nfrom rp import *\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\n# == Empirical data from Compustat == #\nyear = 2013\nfilename = 'compustat_data/USsales_{0}.csv'.format(year)\ndf = pd.read_csv(filename) \nsales = df['sales'] * (10**6) # unit = US Dollars\nempirical_obs = sorted(sales, reverse=True)\n\n# == Model parameters == #\ndef c(x):\n return np.exp(10 * x**2) - 1\nps = RPline(c=c, delta=1.01)\n\n# == Calculate different measures of firm size == #\nstages = ps.compute_stages()\nrev = ps.p_func(stages)\n\n# == Compute step size and value added of each firm == #\nJ = len(stages)\nva = np.empty(J-1)\nsteps = np.empty(J-1)\nfor i in range(J):\n if i + 1 < J:\n va[i] = ps.p_func(stages[i]) - ps.p_func(stages[i+1])\n steps[i] = stages[i] - stages[i+1]\nempl = c(steps)\n\n# == Select which size measure to use == #\nobs = rev * (empirical_obs[0] / rev[0]) \n# obs = va\n#obs = np.asarray(empirical_obs)\n\n# == Print summary statistics == #\n#print \"no of firms: \", len(obs)\n#print \"mean: \", obs.mean()\n#print \"median: \", np.median(obs)\n#print \"sd: \", obs.std()\nq = obs.mean() + 2 * obs.std()\n#print \"fraction of sample > m + 2s: \", np.mean(obs > q)\n#print \"mean / median: \", np.mean(obs) / np.median(obs)\n\n\n# == Setup for figures == #\n\n# == Zipf plot == #\nif 1: \n fig, ax = plt.subplots(figsize=(10, 6.0))\n z = np.log(obs[:-1])\n mu, sd = z.mean(), z.std()\n Z = mu + sd * np.random.randn(len(obs))\n ln_obs = np.exp(Z)\n ln_obs = np.sort(ln_obs)[::-1]\n ax.set_xlabel('log rank', fontsize=14)\n ax.set_ylabel('size', fontsize=14)\n ax.loglog(np.arange(len(obs)) + 1, obs, 'bo', label=\"observations\")\n ax.loglog(np.arange(len(ln_obs)) + 1, ln_obs, 'rp', alpha=0.3,\n label=\"lognormal approximation\")\n ax.legend(frameon=False)\n\n# == Histogram == #\nif 0:\n fig, axes = plt.subplots(3, 1, figsize=(10, 8))\n ax = axes[0]\n ax.hist(va, bins=26, label=\"value added\")\n ax.set_xlim(0, 1.1 * max(va))\n ax = axes[1]\n ax.hist(empl, bins=26, label=\"employment\")\n ax.set_xlim(0, 1.1 * max(empl))\n ax = axes[2]\n ax.hist(rev, bins=26, label=\"revenue\")\n ax.set_xlim(0, 1.1 * max(rev))\n for ax in axes:\n ax.legend(frameon=False, fontsize=14)\n ax.set_ylabel(\"number of firms\", fontsize=14)\n ax.set_yticks((0, 50, 100, 150))\n ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n\n\nplt.show()\n"
]
| [
[
"numpy.empty",
"numpy.log",
"numpy.exp",
"matplotlib.pyplot.subplots",
"numpy.sort",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
khangtran2020/contentDP | [
"d0d83c387f2d21fce342d0c445eb06ec928c1414"
]
| [
"main/main_feat_edge_nus.py"
]
| [
"import os\nimport warnings\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport dgl\nimport numpy as np\nfrom tqdm import tqdm\nfrom Utils.DataProcessing import *\nfrom Datasets.FlickrDataset import FlickrNUSDataset\nfrom Models.GCN import GCN\nfrom Trainer.Trainer import Trainer\nimport torch\nimport sys\n\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\nnum_channel = 128\nlearning_rate = 0.001\nepochs = 20000\npatience = 50\nnum_run = 5\nnum_feat = 2048\nnum_class = 1\nnum_batch = 18\nepsilon_edge = sys.argv[1]\n\ndata_file = 'Data/NUS/feats/'\ndata_edge_file = 'Data/NUS/pairs/'\nsave_model_path = '13JAN2022/'\norg_edge_file = 'flickr_nus.pairs'\ngenerate_edge_file = 'flickr_nus_eps_{}.pairs'.format(epsilon_edge)\n\nfeat_folder = [\n 'perturb_eps_0.01_gamma_0.5',\n 'perturb_eps_0.05_gamma_0.7',\n 'perturb_eps_0.1_gamma_0.3',\n 'perturb_eps_0.2_gamma_0.3',\n 'perturb_eps_0.4_gamma_0.9',\n 'perturb_eps_0.6_gamma_0.3',\n 'perturb_eps_0.8_gamma_0.9',\n 'perturb_eps_1.0_gamma_0.3',\n 'perturb_eps_2.0_gamma_0.9',\n]\nfeat_eps = ['0.01', '0.05', '0.1', '0.2', '0.4', '0.6', '0.8', '1.0', '2.0']\n\nall_result = {}\navg_result = {}\ni = 0\nfor folder in tqdm(feat_folder):\n print(\"Running for folder: {}\".format(folder))\n dataset = FlickrNUSDataset(feat_file=None, feat_folder=data_file+folder, num_batch=num_batch, edge_org = data_edge_file+org_edge_file, edge_generated = data_edge_file+generate_edge_file, type_test = 'feat_edge')\n temp_auc = []\n temp_f1 = []\n temp_acc = []\n for run in range(num_run):\n print(\"Run {}\".format(run + 1))\n name_model_to_save = save_model_path + \"NUS_feat_eps_{}_edge_eps_{}_run_{}.pt\".format( feat_eps[i], epsilon_edge, run+1)\n model = GCN(in_feats=dataset.num_feature, h_feats=num_channel, num_classes=dataset.num_classes)\n trainer = Trainer(num_epoch=epochs, learning_rate=learning_rate, patience=patience, model=model, dataset=dataset,\n name_model=name_model_to_save, device=device)\n auc, f1, acc = trainer.train()\n all_result[\"NUS_feat_eps_{}_edge_eps_{}_run_{}\".format(feat_eps[i],epsilon_edge, run+1)] = (auc, f1, acc)\n temp_auc.append(auc)\n temp_f1.append(f1)\n temp_acc.append(acc)\n avg_result[\"NUS_feat_eps_{}_edge_eps_{}\".format(feat_eps[i],epsilon_edge)] = (np.mean(np.array(temp_auc)), np.mean(np.array(temp_f1)), np.mean(np.array(temp_acc)))\n i += 1\n\nprint(\"=============== ALL RESULTS: ===================\")\nfor key in all_result:\n print(key, all_result[key])\n\nprint(\"=============== AVG RESULTS: ===================\")\nfor key in avg_result:\n print(key, avg_result[key])\n\n"
]
| [
[
"numpy.array",
"torch.cuda.is_available"
]
]
|
facebookresearch/ContextualBO | [
"8e029d710ad9a06d4235b4872dab9fbcbe3ad6be"
]
| [
"park_abr/run_park_non_contextual.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\nimport json\n\nimport numpy as np\nfrom ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy\nfrom ax.modelbridge.factory import get_GPEI, get_sobol\nfrom ax.storage.json_store.encoder import object_to_json\nfrom ax.service.ax_client import AxClient\n\nfrom fb_abr_problem import ParkNoncontextualRunner\n\n########\n# Define problem\nABR_CONTEXT_CONFIG_DICT = {\n 'c0': {'name': 'c0', 'delay': 0.09111558887847584},\n 'c1': {'name': 'c1', 'delay': 0.13919983731019495},\n 'c10': {'name': 'c10', 'delay': 0.04709563378153773},\n 'c11': {'name': 'c11', 'delay': 0.09175980911983045},\n 'c2': {'name': 'c2', 'delay': 0.05811786663939401},\n 'c3': {'name': 'c3', 'delay': 0.15680707174733982},\n 'c4': {'name': 'c4', 'delay': 0.21008791350238118},\n 'c5': {'name': 'c5', 'delay': 0.12895778597785987},\n 'c6': {'name': 'c6', 'delay': 0.05922074675831855},\n 'c7': {'name': 'c7', 'delay': 0.0751735817104147},\n 'c8': {'name': 'c8', 'delay': 0.08200189263592551},\n 'c9': {'name': 'c9', 'delay': 0.0962324885998359}\n}\nnum_trials = 75\n\nfor rep in range(25):\n print('====================', rep)\n\n num_contexts = len(ABR_CONTEXT_CONFIG_DICT)\n benchmark_problem = ParkNoncontextualRunner(context_dict=ABR_CONTEXT_CONFIG_DICT)\n\n t1 = time.time()\n\n gs = GenerationStrategy(\n name=\"GPEI\",\n steps=[\n GenerationStep(get_sobol, 8),\n GenerationStep(get_GPEI, -1),\n ],\n )\n\n axc = AxClient(generation_strategy=gs)\n experiment_parameters = benchmark_problem.base_parameters\n axc.create_experiment(\n name=\"cbo_aggregated_reward_experiment\",\n parameters=experiment_parameters,\n objective_name=\"aggregated_reward\",\n minimize=True,\n )\n context_reward_list = []\n\n def evaluation_aggregated_reward(parameters):\n # put parameters into 1-D array\n x = [\n parameters.get(param[\"name\"]) for param in benchmark_problem.base_parameters\n ]\n aggregated_reward, context_reward = benchmark_problem.f(np.array(x))\n return {\"aggregated_reward\": (aggregated_reward, 0.0)}, context_reward\n\n\n for itrial in range(num_trials):\n parameters, trial_index = axc.get_next_trial()\n aggregated_res, context_res = evaluation_aggregated_reward(parameters)\n axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)\n context_res[\"trial_index\"] = itrial\n context_reward_list.append(context_res)\n\n res = json.dumps(\n {\n \"experiment\": object_to_json(axc.experiment),\n \"context_rewards\": context_reward_list,\n }\n )\n\n with open(f'results/non_contextual_park_rep_{rep}.json', \"w\") as fout:\n json.dump(res, fout)\n\n print ('=============', time.time() - t1)\n"
]
| [
[
"numpy.array"
]
]
|
jdvelasq/tech-miner | [
"85735b3b94b9d56784eafce73c7f9bee37d8c6ed"
]
| [
"techminer/result.py"
]
| [
"\"\"\"\nTechMiner.Result\n==================================================================================================\n\n\"\"\"\nimport altair as alt\nimport geopandas\nimport geoplot\nimport itertools\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom techminer.common import *\nfrom collections import OrderedDict \nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom scipy.optimize import minimize\nfrom shapely.geometry import Point, LineString\nfrom sklearn.cluster import KMeans\nfrom matplotlib.patches import Rectangle\nfrom wordcloud import WordCloud, ImageColorGenerator\n\n#----------------------------------------------------------------------------------------------------\ndef _compute_graph_layout(graph):\n\n path_length = nx.shortest_path_length(graph)\n distances = pd.DataFrame(index=graph.nodes(), columns=graph.nodes())\n for row, data in path_length:\n for col, dist in data.items():\n distances.loc[row,col] = dist\n distances = distances.fillna(distances.max().max())\n\n return nx.kamada_kawai_layout(graph, dist=distances.to_dict())\n\n#--------------------------------------------------------------------------------------------------------\nclass Result(pd.DataFrame):\n \"\"\"Class implementing a dataframe with results of analysis.\n \"\"\"\n #----------------------------------------------------------------------------------------------------\n def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False, \n cluster_data=None, call=None):\n \n super().__init__(data, index, columns, dtype, copy)\n self._call = call\n self._cluster_data = None\n self._cluster_data = cluster_data\n\n #----------------------------------------------------------------------------------------------------\n @property\n def _constructor_expanddim(self):\n return self\n\n\n #----------------------------------------------------------------------------------------------\n def _add_count_to_label(self, column):\n\n count = self.groupby(by=column, as_index=True)[self.columns[-2]].sum()\n count = {key : value for key, value in zip(count.index, count.tolist())}\n self[column] = self[column].map(lambda x: cut_text(str(x) + ' [' + str(count[x]) + ']'))\n\n #----------------------------------------------------------------------------------------------\n def altair_barhplot(self, color='Greys'):\n \"\"\"\n\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.documents_by_year().altair_barhplot()\n alt.Chart(...)\n\n .. image:: ../figs/altair_barhplot.jpg\n :width: 800px\n :align: center \n \n \"\"\"\n if len(self.columns) != 3:\n Exception('Invalid call for result of function:' + self._call)\n\n columns = self.columns.tolist()\n data = pd.DataFrame(self.copy())\n if data.columns[1] != 'Cited by':\n data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'\n data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))\n if columns[0] == 'Year':\n data = data.sort_values(by=columns[0], ascending=False)\n return alt.Chart(data).mark_bar().encode(\n alt.Y(columns[0] + ':N', sort=alt.EncodingSortField(\n field=columns[1] + ':Q')),\n alt.X(columns[1] + ':Q'),\n alt.Color(columns[1] + ':Q', scale=alt.Scale(scheme=color)))\n\n #----------------------------------------------------------------------------------------------\n def altair_barplot(self):\n \"\"\"Vertical bar plot in Altair.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.documents_by_year().altair_barplot()\n alt.Chart(...)\n\n .. image:: ../figs/altair_barplot.jpg\n :width: 500px\n :align: center \n \"\"\"\n if len(self.columns) != 3:\n Exception('Invalid call for result of function:' + self._call)\n\n columns = self.columns.tolist()\n data = pd.DataFrame(self.copy())\n data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'\n data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))\n\n return alt.Chart(data).mark_bar().encode(\n alt.X(columns[0] + ':N', sort=alt.EncodingSortField(field=columns[1] + ':Q')),\n alt.Y(columns[1] + ':Q'),\n alt.Color(columns[1] + ':Q', scale=alt.Scale(scheme='greys')))\n\n #----------------------------------------------------------------------------------------------------\n def altair_circle(self, ascending_r=None, ascending_c=None, filename=None, **kwds):\n \"\"\"Altair scatter plot with filled circles for visualizing relationships.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.auto_corr(\n ... column='Authors',\n ... sep=',',\n ... top_n=30\n ... ).altair_circle()\n alt.Chart(...)\n\n .. image:: ../figs/altair_circle.png\n :width: 800px\n :align: center\n\n \"\"\"\n if len(self.columns) != 4:\n Exception('Invalid call for result of function:' + self._call)\n\n if ascending_r is None or ascending_r is True:\n sort_X = 'ascending'\n else:\n sort_X = 'descending'\n\n if ascending_c is None or ascending_c is True:\n sort_Y = 'ascending'\n else:\n sort_Y = 'descending'\n\n chart = alt.Chart(self).mark_circle().encode(\n alt.X(self.columns[0] + ':N',\n axis=alt.Axis(labelAngle=270), \n sort=sort_X),\n alt.Y(self.columns[1] + ':N',\n sort=sort_Y),\n size=self.columns[2],\n color=self.columns[2])\n\n if filename is not None:\n char.save(filename)\n\n return chart\n\n\n #----------------------------------------------------------------------------------------------------\n def altair_heatmap(self, ascending_r=None, ascending_c=None, filename=None, **kwds):\n \"\"\"Altair Heatmap\n Available cmaps:\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.terms_by_year( \n ... column='Authors', \n ... sep=',',\n ... top_n=20).altair_heatmap()\n alt.Chart(...)\n \n .. image:: ../figs/altair_heatmap.jpg\n :width: 600px\n :align: center\n\n \"\"\"\n\n if len(self.columns) != 4:\n Exception('Invalid call for result of function:' + self._call)\n\n ## force the same order of cells in rows and cols ------------------------------------------\n if self._call == 'auto_corr':\n if ascending_r is None and ascending_c is None:\n ascending_r = True\n ascending_c = True\n elif ascending_r is not None and ascending_r != ascending_c:\n ascending_c = ascending_r\n elif ascending_c is not None and ascending_c != ascending_r:\n ascending_r = ascending_c\n else:\n pass\n ## end -------------------------------------------------------------------------------------\n\n _self = self.copy()\n _self[_self.columns[0]] = _self[_self.columns[0]].map(lambda w: cut_text(w))\n _self[_self.columns[1]] = _self[_self.columns[1]].map(lambda w: cut_text(w))\n\n if ascending_r is None or ascending_r is True:\n sort_X = 'ascending'\n else:\n sort_X = 'descending'\n\n if ascending_c is None or ascending_c is True:\n sort_Y = 'ascending'\n else:\n sort_Y = 'descending'\n\n graph = alt.Chart(_self).mark_rect().encode(\n alt.X(_self.columns[0] + ':O', sort=sort_X),\n alt.Y(_self.columns[1] + ':O', sort=sort_Y),\n color=_self.columns[2] + ':Q')\n\n if self._call == 'co_ocurrence':\n text = graph.mark_text(\n align='center',\n baseline='middle',\n dx=5\n ).encode(\n text=_self.columns[2] + ':Q'\n )\n else:\n text = None\n\n plt.tight_layout()\n\n return graph\n\n #----------------------------------------------------------------------------------------------\n def barhplot(self, color='gray', figsize=(12,8)):\n \"\"\"Plots a pandas.DataFrame using Altair.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.documents_by_year().barhplot()\n\n .. image:: ../figs/barhplot.jpg\n :width: 600px\n :align: center \n \"\"\"\n if len(self.columns) != 3:\n Exception('Invalid call for result of function:' + self._call)\n \n data = pd.DataFrame(self.copy())\n columns = data.columns.tolist()\n if data.columns[1] != 'Cited by':\n data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'\n data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))\n\n if columns[0] == 'Year':\n data = data.sort_values(by=columns[0], ascending=True)\n else:\n data = data.sort_values(by=columns[1], ascending=True)\n \n #plt.figure(figsize=figsize)\n data.plot.barh(columns[0], columns[1], color=color, figsize=figsize)\n plt.gca().xaxis.grid(True)\n \n\n\n\n #----------------------------------------------------------------------------------------------\n def barplot(self, color='gray', figsize=(8,12)):\n \"\"\"Vertical bar plot in matplotlib.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.documents_by_year().barplot()\n\n .. image:: ../figs/barplot.jpg\n :width: 600px\n :align: center \n \n \"\"\"\n if len(self.columns) != 3:\n Exception('Invalid call for result of function:' + self._call)\n\n columns = self.columns.tolist()\n\n plt.figure(figsize=figsize)\n data = pd.DataFrame(self.copy())\n data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'\n data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))\n data.plot.bar(columns[0], columns[1], color=color)\n plt.gca().yaxis.grid(True)\n\n\n #----------------------------------------------------------------------------------------------------\n def chord_diagram(self, figsize=(12, 12), minval=None, R=3, n_bezier=100, dist=0.2):\n \"\"\"Creates a chord diagram for representing clusters.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.auto_corr(\n ... column='Authors',\n ... sep=',',\n ... top_n=20).chord_diagram()\n >>> plt.savefig('./figs/chord-diagram.jpg')\n \n .. image:: ../figs/chord-diagram.jpg\n :width: 800px\n :align: center\n\n \"\"\"\n\n if self._cluster_data is None:\n Exception('Invalid call for result of function:' + self._call)\n\n chord_diagram(\n self[self.columns[0]].unique(), \n self._cluster_data, \n figsize=figsize, \n minval=minval, \n R=R, \n n_bezier=n_bezier, \n dist=dist)\n\n \n #----------------------------------------------------------------------------------------------------\n def heatmap(self, ascending_r=None, ascending_c=None, figsize=(10, 10), cmap='Blues'):\n \"\"\"Heat map.\n \n\n https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html\n\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'\n\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.terms_by_year( \n ... column='Authors', \n ... sep=',',\n ... top_n=20).heatmap(figsize=(8,4))\n >>> plt.savefig('./figs/heatmap.jpg')\n \n .. image:: ../figs//heatmap.jpg\n :width: 600px\n :align: center\n\n \"\"\"\n\n if len(self.columns) != 4:\n Exception('Invalid call for result of function:' + self._call)\n\n\n ## force the same order of cells in rows and cols ------------------------------------------\n if self._call == 'auto_corr':\n if ascending_r is None and ascending_c is None:\n ascending_r = True\n ascending_c = True\n elif ascending_r is not None and ascending_r != ascending_c:\n ascending_c = ascending_r\n elif ascending_c is not None and ascending_c != ascending_r:\n ascending_r = ascending_c\n else:\n pass\n ## end -------------------------------------------------------------------------------------\n\n\n\n x = self.tomatrix(ascending_r, ascending_c)\n\n ## rename columns and row index\n x.columns = [cut_text(w) for w in x.columns]\n x.index = [cut_text(w) for w in x.index]\n\n plt.figure(figsize=figsize)\n\n if self._call == 'factor_analysis':\n x = self.tomatrix(ascending_r, ascending_c)\n x = x.transpose()\n ## x = x.apply(lambda w: abs(w))\n plt.pcolor(np.transpose(abs(x.values)), cmap=cmap)\n else:\n plt.pcolor(np.transpose(x.values), cmap=cmap)\n\n #plt.pcolor(np.transpose(x.values), cmap=cmap)\n plt.xticks(np.arange(len(x.index))+0.5, x.index, rotation='vertical')\n plt.yticks(np.arange(len(x.columns))+0.5, x.columns)\n ## plt.gca().set_aspect('equal', 'box')\n plt.gca().invert_yaxis()\n\n ## changes the color of rectangle for autocorrelation heatmaps ---------------------------\n \n # if self._call == 'auto_corr':\n # for idx in np.arange(len(x.index)):\n # plt.gca().add_patch(\n # Rectangle((idx, idx), 1, 1, fill=False, edgecolor='red')\n # )\n\n ## end ------------------------------------------------------------------------------------\n\n\n ## annotation\n for idx_row, row in enumerate(x.index):\n for idx_col, col in enumerate(x.columns):\n\n if self._call in ['auto_corr', 'cross_corr', 'factor_analysis']:\n\n if abs(x.at[row, col]) > x.values.max() / 2.0:\n color = 'white'\n else:\n color = 'black'\n\n plt.text(\n idx_row + 0.5, \n idx_col + 0.5, \n \"{:3.2f}\".format(x.at[row, col]),\n ha=\"center\", \n va=\"center\", \n color=color)\n \n else:\n if x.at[row, col] > 0:\n \n if x.at[row, col] > x.values.max() / 2.0:\n color = 'white'\n else:\n color = 'black'\n\n plt.text(\n idx_row + 0.5, \n idx_col + 0.5, \n int(x.at[row, col]),\n ha=\"center\", \n va=\"center\", \n color=color)\n\n\n plt.tight_layout()\n plt.show()\n\n\n #----------------------------------------------------------------------------------------------------\n def map(self, min_value=None, top_links=None, figsize = (10,10), \n font_size=12, factor=None, size=(25,300)):\n \"\"\"\n Draw an autocorrelation, crosscorrelation or factor map.\n\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.auto_corr(\n ... column='Authors',\n ... sep=',',\n ... top_n=20).map()\n >>> plt.savefig('./figs/autocorr-map.jpg') \n\n .. image:: ../figs/autocorr-map.jpg\n :width: 800px\n :align: center \n\n \"\"\"\n\n if self._cluster_data is None:\n Exception('Invalid call for result of function:' + self._call)\n\n ## cluster dataset\n cluster_data = self._cluster_data.copy()\n\n ## figure properties\n plt.figure(figsize=figsize)\n\n ## graph\n graph = nx.Graph()\n\n ## adds nodes to graph\n clusters = list(set(cluster_data.cluster))\n nodes = list(set(self.tomatrix().index))\n\n graph.add_nodes_from(clusters)\n graph.add_nodes_from(nodes)\n\n\n ## adds edges and properties\n weigth = []\n style = []\n value = []\n for _, row in cluster_data.iterrows():\n graph.add_edge(row[1], row[2])\n if row[3] >= 0.75:\n weigth += [4]\n style += ['solid']\n value += [row[3]]\n elif row[3] >= 0.50:\n weigth += [2]\n style += ['solid']\n value += [row[3]]\n elif row[3] >= 0.25:\n weigth += [1]\n style += ['dashed']\n value += [row[3]]\n else:\n weigth += [1]\n style += ['dotted']\n value += [row[3]]\n\n\n edges = pd.DataFrame({\n 'edges' : graph.edges(),\n 'weight' : weigth,\n 'style' : style,\n 'value' : value\n })\n\n edges = edges.sort_values(by='value', ascending=False)\n\n if top_links is not None and top_links < len(edges):\n edges = edges[0:top_links]\n\n if min_value is not None:\n edges = edges[edges['value'] >= min_value]\n\n ## edges from center of cluster to nodes.\n for _, row in cluster_data.iterrows():\n graph.add_edge(row[0], row[1]) \n graph.add_edge(row[0], row[2])\n \n\n ## graph layout\n path_length = nx.shortest_path_length(graph)\n distances = pd.DataFrame(index=graph.nodes(), columns=graph.nodes())\n for row, data in path_length:\n for col, dist in data.items():\n distances.loc[row,col] = dist\n distances = distances.fillna(distances.max().max())\n layout = nx.kamada_kawai_layout(graph, dist=distances.to_dict())\n\n ## nodes drawing\n node_size = [x[(x.find('[')+1):-1] for x in nodes]\n node_size = [float(x) for x in node_size]\n max_node_size = max(node_size)\n min_node_size = min(node_size)\n node_size = [size[0] + x / (max_node_size - min_node_size) * size[1] for x in node_size]\n\n nx.draw_networkx_nodes(\n graph, \n layout, \n nodelist=nodes, \n node_size=node_size,\n node_color='red')\n\n ## edges drawing\n for style in list(set(edges['style'].tolist())):\n\n edges_set = edges[edges['style'] == style]\n\n if len(edges_set) == 0:\n continue\n\n nx.draw_networkx_edges(\n graph, \n layout,\n edgelist=edges_set['edges'].tolist(), \n style=style,\n width=edges_set['weight'].tolist(),\n edge_color='black')\n\n\n ## node labels\n x_left, x_right = plt.xlim()\n y_left, y_right = plt.ylim()\n delta_x = (x_right - x_left) * 0.01\n delta_y = (y_right - y_left) * 0.01\n for node in nodes:\n x_pos, y_pos = layout[node]\n plt.text(\n x_pos + delta_x, \n y_pos + delta_y, \n node, \n size=font_size,\n ha='left',\n va='bottom',\n bbox=dict(\n boxstyle=\"square\",\n ec='lightgray',\n fc='white',\n ))\n\n if factor is not None:\n left, right = plt.xlim()\n width = (right - left) * factor / 2.0\n plt.xlim(left - width, right + width)\n\n plt.axis('off')\n\n\n\n\n #----------------------------------------------------------------------------------------------------\n def ocurrence_map(self, min_value=None, top_links=None, figsize = (10,10), \n font_size=12, factor=None, size=(300,1000)):\n \"\"\"Cluster map for ocurrence and co-ocurrence matrices.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.co_ocurrence(\n ... column_r='Authors', \n ... column_c='Authors', \n ... sep_r=',', \n ... sep_c=',',\n ... top_n=10\n ... ).heatmap()\n >>> plt.savefig('./figs/heatmap-ocurrence-map.jpg')\n \n .. image:: ../figs/heatmap-ocurrence-map.jpg\n :width: 600px\n :align: center\n\n >>> rdf.co_ocurrence(\n ... column_r='Authors', \n ... column_c='Authors', \n ... sep_r=',', \n ... sep_c=',',\n ... top_n=10\n ... ).ocurrence_map(\n ... figsize=(11,11),\n ... font_size=10,\n ... factor = 0.1,\n ... size=(300,1000)\n ... )\n >>> plt.savefig('./figs/ocurrence-map.jpg')\n \n .. image:: ../figs/ocurrence-map.jpg\n :width: 600px\n :align: center\n\n \"\"\"\n\n if self._call not in ['ocurrence', 'co_ocurrence']:\n Exception('Invalid call for result of function:' + self._call)\n\n\n ## figure properties\n plt.figure(figsize=figsize)\n\n ## graph\n graph = nx.Graph()\n\n terms_r = list(set(self.tomatrix().index.tolist()))\n terms_c = list(set(self.tomatrix().columns.tolist()))\n\n nodes = list(set(terms_r + terms_c))\n nodes = [cut_text(x) for x in nodes]\n graph.add_nodes_from(nodes)\n\n if sorted(terms_r) != sorted(terms_c):\n\n numnodes = [str(i) for i in range(len(self))]\n graph.add_nodes_from(numnodes)\n\n for idx, row in self.iterrows():\n graph.add_edge(row[0], str(idx))\n graph.add_edge(row[1], str(idx))\n\n labels={str(idx):row[2] for idx, row in self.iterrows()}\n\n else:\n \n mtx = self.tomatrix()\n edges = []\n labels = {}\n\n n = 0\n for idx_r, row in enumerate(mtx.index.tolist()):\n for idx_c, col in enumerate(mtx.columns.tolist()):\n\n if idx_c < idx_r:\n continue\n\n if mtx.at[row, col] > 0:\n edges += [(row, str(n)), (col, str(n))]\n labels[str(n)] = mtx.at[row, col]\n n += 1\n \n\n numnodes = [str(i) for i in range(n)]\n graph.add_nodes_from(numnodes)\n\n for a, b in edges:\n graph.add_edge(a, b)\n \n ## graph layout\n layout = _compute_graph_layout(graph)\n \n ## draw terms nodes\n node_size = [int(n[n.find('[')+1:-1]) for n in nodes]\n node_size = [size[0] + (n - min(node_size)) / (max(node_size) - min(node_size)) * (size[1] - size[0]) for n in node_size]\n nx.draw_networkx_nodes(\n graph, \n layout, \n nodelist=nodes, \n node_size=node_size,\n node_color='red')\n\n x_left, x_right = plt.xlim()\n y_left, y_right = plt.ylim()\n delta_x = (x_right - x_left) * 0.01\n delta_y = (y_right - y_left) * 0.01\n for node in nodes:\n x_pos, y_pos = layout[node]\n plt.text(\n x_pos + delta_x, \n y_pos + delta_y, \n node, \n size=font_size,\n ha='left',\n va='bottom',\n bbox=dict(\n boxstyle=\"square\",\n ec='gray',\n fc='white',\n ))\n\n # nx.draw_networkx_labels(\n # graph,\n # layout,\n # labels={t:t for t in terms},\n # bbox=dict(facecolor='none', edgecolor='lightgray', boxstyle='round'))\n\n ## draw quantity nodes\n node_size = [int(labels[n]) for n in labels.keys()]\n node_size = [size[0] + (n - min(node_size)) / (max(node_size) - min(node_size)) * (size[1] - size[0]) for n in node_size] \n nx.draw_networkx_nodes(\n graph, \n layout, \n nodelist=numnodes, \n node_size=node_size,\n node_color='lightblue')\n\n nx.draw_networkx_labels(\n graph,\n layout,\n labels=labels,\n font_color='black')\n\n ## edges\n nx.draw_networkx_edges(\n graph, \n layout,\n width=1\n )\n plt.axis('off')\n \n\n #----------------------------------------------------------------------------------------------------\n def print_IDs(self):\n \"\"\"Auxiliary function to print IDs of documents. \n \"\"\"\n\n if self._call in ['co_ocurrence', 'cross_corr', 'auto_corr']:\n\n for idx, row in self.iterrows():\n if row[-1] is not None:\n print(row[0], ', ', row[1], ' (', len(row[-1]), ')', ' : ', sep='', end='')\n for i in row[-1]:\n print(i, sep='', end='')\n print()\n\n elif self._call == 'terms_by_terms_by_year':\n\n for idx, row in self.iterrows():\n if row[-1] is not None:\n print(row[0], ', ', row[1], ', ', row[2], ' (', len(row[-1]), ')', ' : ', sep='', end='')\n for i in row[-1]:\n print(i, sep='', end='')\n print()\n\n elif self._call == 'factor_analysis':\n pass\n else:\n pass\n\n\n #----------------------------------------------------------------------------------------------------\n def sankey_plot(self, figsize=(7,10), minval=None):\n \"\"\"Cross-relation sankey plot.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.cross_corr(\n ... column_r='keywords (cleaned)',\n ... sep_r=';',\n ... column_c='Authors',\n ... sep_c=','\n ... ).sankey_plot(minval=0.1)\n >>> plt.savefig('./figs/sankey-plot.jpg')\n \n .. image:: ../figs//sankey-plot.jpg\n :width: 600px\n :align: center\n\n\n \"\"\"\n if self._call != 'cross_corr':\n Exception('Invalid call for result of function:' + self._call)\n\n x = self\n \n llabels = sorted(list(set(x[x.columns[0]])))\n rlabels = sorted(list(set(x[x.columns[1]])))\n\n factorL = max(len(llabels)-1, len(rlabels)-1) / (len(llabels) - 1)\n factorR = max(len(llabels)-1, len(rlabels)-1) / (len(rlabels) - 1)\n\n lpos = {k:v*factorL for v, k in enumerate(llabels)}\n rpos = {k:v*factorR for v, k in enumerate(rlabels)}\n \n fig, ax1 = plt.subplots(figsize=(7, 10))\n ax1.scatter([0] * len(llabels), llabels, color='black', s=50)\n\n for index, r in x.iterrows():\n\n row = r[0]\n col = r[1]\n val = r[2]\n\n if val >= 0.75:\n linewidth = 4\n linestyle = '-' \n elif val >= 0.50:\n linewidth = 2\n linstyle = '-' \n elif val >= 0.25:\n linewidth = 2\n linestyle = '--' \n elif val < 0.25:\n linewidth = 1\n linestyle = ':'\n else: \n linewidth = 0\n linestyle = '-'\n\n if minval is None:\n plt.plot(\n [0, 1], \n [lpos[row], rpos[col]], \n linewidth=linewidth, \n linestyle=linestyle, \n color='black')\n elif abs(val) >= minval :\n plt.plot(\n [0, 1], \n [lpos[row], rpos[col]], \n linewidth=linewidth, \n linestyle=linestyle, \n color='black')\n\n ax2 = ax1.twinx()\n ax2.scatter([1] * len(rlabels), rlabels, color='black', s=50)\n #ax2.set_ylim(0, len(rlabels)-1)\n \n \n for txt in ['bottom', 'top', 'left', 'right']:\n ax1.spines[txt].set_color('white')\n ax2.spines[txt].set_color('white')\n \n ax2.set_xticks([])\n\n plt.tight_layout()\n \n #----------------------------------------------------------------------------------------------\n def seaborn_barhplot(self, color='gray'):\n \"\"\"\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.documents_by_year().seaborn_barhplot()\n \n\n .. image:: ../figs/seaborn_barhplot.jpg\n :width: 600px\n :align: center \n\n \"\"\"\n if len(self.columns) != 3:\n Exception('Invalid call for result of function:' + self._call)\n\n columns = self.columns.tolist()\n data = pd.DataFrame(self.copy())\n if data.columns[1] != 'Cited by':\n data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'\n data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))\n\n if columns[0] == 'Year':\n data = data.sort_values(by=columns[0], ascending=False)\n else:\n data = data.sort_values(by=columns[1], ascending=False)\n sns.barplot(\n x=columns[1],\n y=columns[0],\n data=data,\n label=columns[0],\n color=color)\n plt.gca().xaxis.grid(True)\n\n #----------------------------------------------------------------------------------------------\n def seaborn_barplot(self, color='gray'):\n \"\"\"Vertical bar plot in Seaborn.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.documents_by_year().seaborn_barplot()\n\n .. image:: ../figs/seaborn_barhplot.jpg\n :width: 800px\n :align: center \n \"\"\"\n if len(self.columns) != 3:\n Exception('Invalid call for result of function:' + self._call)\n\n columns = self.columns.tolist()\n data = Result(self.copy())\n data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'\n data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))\n\n columns = data.columns.tolist()\n result = sns.barplot(\n y=columns[1],\n x=columns[0],\n data=data,\n label=columns[0],\n color=color)\n _, labels = plt.xticks()\n result.set_xticklabels(labels, rotation=90)\n plt.gca().yaxis.grid(True)\n\n #----------------------------------------------------------------------------------------------------\n def seaborn_heatmap(self, ascending_r=None, ascending_c=None, filename=None):\n \"\"\"Heat map.\n \n\n https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html\n\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'\n\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.terms_by_year( \n ... column='Authors', \n ... sep=',',\n ... top_n=20).seaborn_heatmap()\n >>> plt.savefig('./figs/seaborn_heatmap.jpg')\n \n .. image:: ../figs//seaborn_heatmap.jpg\n :width: 600px\n :align: center\n\n \"\"\"\n\n if len(self.columns) != 4:\n Exception('Invalid call for result of function:' + self._call)\n\n ## force the same order of cells in rows and cols ------------------------------------------\n if self._call == 'auto_corr':\n if ascending_r is None and ascending_c is None:\n ascending_r = True\n ascending_c = True\n elif ascending_r is not None and ascending_r != ascending_c:\n ascending_c = ascending_r\n elif ascending_c is not None and ascending_c != ascending_r:\n ascending_r = ascending_c\n else:\n pass\n ## end -------------------------------------------------------------------------------------\n\n\n sns.set()\n _self = self.tomatrix(ascending_r, ascending_c)\n _self = _self.transpose()\n _self.columns = [cut_text(w) for w in _self.columns]\n _self.index = [cut_text(w) for w in _self.index]\n\n sns_plot = sns.heatmap(_self)\n\n if filename is not None:\n sns_plot.savefig(filename)\n\n #return sns_plot\n\n\n #----------------------------------------------------------------------------------------------------\n def seaborn_relplot(self, ascending_r=None, ascending_c=None, filename=None):\n \"\"\"Seaborn relplot plot with filled circles for visualizing relationships.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.auto_corr(\n ... column='Authors',\n ... sep=',',\n ... top_n=30\n ... ).seaborn_relplot(filename='./figs/seaborn_relplot.png')\n\n .. image:: ../figs//seaborn_relplot.png\n :width: 600px\n :align: center\n \"\"\"\n\n if len(self.columns) != 4:\n Exception('Invalid call for result of function:' + self._call)\n\n sns_plot = sns.relplot(\n x = self.columns[0],\n y = self.columns[1],\n size = self.columns[2],\n alpha = 0.8,\n palette = 'viridis',\n data = self)\n plt.xticks(rotation=90)\n if filename is not None:\n sns_plot.savefig(filename)\n \n\n #----------------------------------------------------------------------------------------------------\n def tomatrix(self, ascending_r=None, ascending_c=None):\n \"\"\"Displays a term by term dataframe as a matrix.\n\n >>> mtx = Result({\n ... 'rows':['r0', 'r1', 'r2', 'r0', 'r1', 'r2'],\n ... 'cols':['c0', 'c1', 'c0', 'c1', 'c0', 'c1'],\n ... 'vals':[ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]\n ... })\n >>> mtx\n rows cols vals\n 0 r0 c0 1.0\n 1 r1 c1 2.0\n 2 r2 c0 3.0\n 3 r0 c1 4.0\n 4 r1 c0 5.0\n 5 r2 c1 6.0\n\n >>> mtx.tomatrix() # doctest: +NORMALIZE_WHITESPACE\n c0 c1\n r0 1.0 4.0\n r1 5.0 2.0\n r2 3.0 6.0 \n\n \"\"\"\n\n # if self._call not in [\n # 'coo-matrix',\n # 'cross-matrix',\n # 'auto-matrix']:\n\n # raise Exception('Invalid function call for type: ' + self._call )\n\n\n if self.columns[0] == 'Year':\n year = self.Year.copy()\n dict_year = { x[0:x.find(' [')] : x for x in year}\n year = year.map(lambda x: int(x[0:x.find('[')]))\n year = [str(x) for x in range(min(year), max(year)+1)]\n year = [y + ' [0]' if y not in dict_year.keys() else dict_year[y] for y in year]\n termA_unique = year\n # termA_unique = range(min(self.Year), max(self.Year)+1)\n else:\n termA_unique = self.iloc[:,0].unique()\n \n if self.columns[1] == 'Year':\n year = self.Year.copy()\n dict_year = {x[0:x.find(' [')] : x for x in year}\n year = year.map(lambda x: int(x[0:x.find('[')]))\n year = [str(x) for x in range(min(year), max(year)+1)]\n year = [y + ' [0]' if y not in dict_year.keys() else dict_year[y] for y in year]\n termB_unique = year\n # termB_unique = range(min(self.Year), max(self.Year)+1)\n else:\n termB_unique = self.iloc[:,1].unique()\n \n if ascending_r is not None:\n termA_unique = sorted(termA_unique, reverse = not ascending_r)\n\n if ascending_c is not None:\n termB_unique = sorted(termB_unique, reverse = not ascending_c)\n\n if self._call == 'co_ocurrence':\n result = pd.DataFrame(\n np.full((len(termA_unique), len(termB_unique)), 0)\n )\n\n else:\n result = pd.DataFrame(\n np.zeros((len(termA_unique), len(termB_unique)))\n )\n \n result.columns = termB_unique\n result.index = termA_unique\n\n for index, r in self.iterrows():\n row = r[0]\n col = r[1]\n val = r[2]\n result.loc[row, col] = val\n \n return Result(result, call='Matrix')\n\n #----------------------------------------------------------------------------------------------------\n def transpose(self, *args, **kwargs):\n \"\"\"Transpose results matrix.\n \"\"\"\n return Result(super().transpose(), call=self._call)\n \n\n #----------------------------------------------------------------------------------------------------\n #TODO personalizar valor superior para escalar los pesos de los puentes\n #TODO map\n def network(self, save=False, name='network.png', corr_min=0.7, node_color='lightblue',\n edge_color='lightgrey', edge_color2='lightcoral', node_size=None, fond_size=4,\n figsize = (10,10)):\n \"\"\"\n This function generates network graph for matrix.\n\n Args:\n matrix (pandas.DataFrame): Matrix with variables on indexes and column titles\n save (boolean): If True, the graph will save with the name given\n name (str): Name to save the png file with the image\n corr_min (int): Minimum absolute value for the relationships between variables \n to be shown in the graph. \n It is suggested when a correlation matrix is being used\n node_color (str): Color name used to plot nodes\n edge_color (str): Color name used to plot edges with positive weights\n edge_color2 (str): Color name used to plot edges with negative weights\n node_size (int): If None value, the size of the nodes is plotted according\n to the weights of edges that arrive and leave each one of them.\n If numeric value, all nodes will be plotted with this given size\n fond_size (int): Node label fond size\n figsize (float, float): size of figure drawn\n\n Returns:\n None\n \n\n\n \"\"\"\n\n if self._call not in [\n 'co_ocurrence',\n 'cross_corr',\n 'auto_corr',\n 'factor_analysis']:\n\n raise Exception('Invalid function call for type: ' + self._call )\n\n\n if self._call == 'factor_analysis':\n x = self.copy()\n else:\n x = self.tomatrix()\n\n plt.clf()\n plt.figure(figsize=figsize)\n \n #generate network graph\n graph = nx.Graph()\n # add nodes\n rows = x.index\n columns = x.columns\n nodes = list(set(rows.append(columns)))\n\n #add nodes\n graph.add_nodes_from(nodes)\n list_ = list(OrderedDict.fromkeys(itertools.product(rows, columns)))\n if len(rows) == len(columns) and (all(rows.sort_values())==all(columns.sort_values())):\n list_ = list(set(tuple(sorted(t)) for t in list_))\n\n # add edges\n for i in range(len(list_)):\n combinations=list_[i]\n from_node, to_node = combinations[0], combinations[1] \n if from_node != to_node:\n weight = x.loc[from_node, to_node]\n if weight != 0 and abs(weight)>corr_min: \n if weight<0:\n weight=abs(weight)\n edge_colour =edge_color2\n else:\n edge_colour = edge_color\n graph.add_edge(from_node, to_node, weight=weight, color = edge_colour)\n \n #calculate distance between relationated nodes to avoid overlaping\n path_length = nx.shortest_path_length(graph)\n distances = pd.DataFrame(index=graph.nodes(), columns=graph.nodes())\n for row, data in path_length:\n for col, dist in data.items():\n distances.loc[row,col] = dist\n distances = distances.fillna(distances.max().max() )\n\n #layout of graph\n pos = nx.kamada_kawai_layout(graph, dist=distances.to_dict())\n\n #weights and colors of the relationships between nodes for edges thickness \n weights = dict(((u, v), int(d[\"weight\"])) for u, v, d in graph.edges(data=True))\n colors = dict(((u, v), d[\"color\"]) for u, v, d in graph.edges(data=True))\n\n #Edges weights for plot\n max_=max([i for i in weights.values()])\n min_=min([i for i in weights.values()])\n min_range=1\n max_range=5\n if max_<=1:\n width = ([(1+x)*2 for x in weights.values()]) \n else:\n width = ([((((x-min_)/(max_-min_))*(max_range-min_range))+min_range) for x in weights.values()]) \n # width=list(weights.values())\n \n #node sizes\n if not node_size:\n node_sizes = dict(graph.degree())\n node_sizes = ([(x)*10 for key,x in node_sizes.items()]) \n else:\n node_sizes=node_size\n\n #visual graph configuration\n nx.draw(graph, pos,node_size=node_sizes, node_color=node_color, \n edge_color=list(colors.values()), font_size=fond_size,\n with_labels=True, width=width)\n \n #save figure as png\n if save:\n plt.savefig(name, format=\"PNG\", dpi=300, bbox_inches='tight')\n\n plt.tight_layout()\n plt.show()\n return None\n\n\n #----------------------------------------------------------------------------------------------------\n #TODO networkmap validar como pasar lonlat,\n #que pasa si valores negativos???\n #personalizar tamaño de la figura, \n #guardar archivo \n #quitar ejes\n\n def networkmap(matrix, color_edges ='grey', color_node='red',color_map = 'white', edge_map = 'lightgrey', node_size =None, edge_weight = None):\n\n \"\"\"\n This function generates network graph over map, for matrix with country relations.\n\n Args:\n matrix (pandas.DataFrame): Matrix with variables on indexes and column titles\n color_edges (str): Color name used to plot edges\n color_node (str): Color name used to plot nodes\n color_map (str): Color name used to plot map countries\n edge_map (str): Color name used to plot contries border\n node_size (int): If None value, the size of the nodes is plotted according\n to the weights of edges that arrive and leave each one of them.\n If numeric value, all nodes will be plotted with this given size\n edge_weight (int): If None value, the weigth of the edges is plotted according\n to matrix values\n If numeric value, all edges will be plotted with this given size\n Returns:\n None\n #\n \"\"\"\n\n #Get longitudes and latituds\n lonlat=pd.read_csv('LonLat.csv',sep=';')\n\n #node's names\n rows=matrix.index\n columns=matrix.columns\n nodes=list(set(rows.append(columns)))\n nodes = [row.replace(' ', '') for row in rows ]\n\n\n #nodes_combinations\n list_ = list(OrderedDict.fromkeys(itertools.product(rows, columns)))\n if len(rows)== len(columns) and (all(rows.sort_values())==all(columns.sort_values())):\n list_=list(set(tuple(sorted(t)) for t in list_))\n \n\n pos=lonlat[lonlat.country.isin(nodes)]\n\n geometry = [Point(xy) for xy in zip(pos['lon'], pos['lat'])]\n\n # Coordinate reference system : WGS84\n crs = {'init': 'epsg:4326'}\n\n # Creating a Geographic data frame from nodes\n gdf = geopandas.GeoDataFrame(pos, crs=crs, geometry=geometry)\n\n #edges\n df=pd.DataFrame({'initial':[],'final':[],'initial_lon': [], 'initial_lat': [],'final_lon': [],'final_lat': [], 'weight': []})\n for i in range(len(list_)):\n combinations=list_[i]\n from_node, to_node = combinations[0],combinations[1] \n if from_node != to_node:\n weight =matrix.loc[from_node,to_node]\n if weight != 0: \n df = df.append({'initial':from_node.replace(' ', ''),'final':to_node.replace(' ', ''),'initial_lon': pos[pos.country==from_node.replace(' ', '')]['lon'].values, 'initial_lat': pos[pos.country==from_node.replace(' ', '')]['lat'].values,'final_lon': pos[pos.country==to_node.replace(' ', '')]['lon'].values,'final_lat': pos[pos.country==to_node.replace(' ', '')]['lat'].values, 'weight': weight}, ignore_index='True')\n\n # Creating a Geographic data frame from edges \n df['orig_coord'] = [Point(xy) for xy in zip(df['initial_lon'], df['initial_lat'])]\n df['dest_coord'] = [Point(xy) for xy in zip(df['final_lon'], df['final_lat'])]\n\n geometry_lines=[LineString(xy) for xy in zip(df.orig_coord,df.dest_coord)]\n gdf_lines=geopandas.GeoDataFrame(df, crs=crs, geometry=geometry_lines)\n\n #base map\n world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))\n\n #nodes size\n if not node_size:\n nodes_freq=list(gdf_lines.initial) + list(gdf_lines.final)\n nodes_freq.sort()\n nodes_size= {x:nodes_freq.count(x) for x in nodes_freq}\n size=nodes_size.values()\n size=[x*5 for x in size]\n else:\n size=node_size\n \n #edges weigth\n if not node_size:\n edges_=list(gdf_lines.weight)\n else:\n edges_= node_size\n #plot graph\n gdf.plot(ax=world.plot(ax=gdf_lines.plot(color=color_edges, markersize=edges_,alpha=0.5),color=color_map, edgecolor= edge_map), color=color_node, markersize=size)\n \n plt.tight_layout()\n plt.show()\n\n return None\n\n #----------------------------------------------------------------------------------------------\n def wordcloud(self, figsize=(14, 7), max_font_size=50, max_words=100, \n background_color=\"white\"):\n \"\"\"\n\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> rdf.documents_by_terms('Source title').wordcloud()\n\n .. image:: ../figs/wordcloud.jpg\n :width: 800px\n :align: center \n \"\"\"\n \n if len(self.columns) != 3:\n Exception('Invalid call for result of function:' + self._call)\n\n columns = self.columns.tolist()\n\n words = [row[0] for _, row in self.iterrows() for i in range(row[1])]\n\n wordcloud = WordCloud(\n max_font_size=max_font_size, \n max_words=max_words, \n background_color=background_color).generate(' '.join(words))\n\n plt.figure(figsize=figsize)\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis(\"off\")\n plt.show()\n\n #----------------------------------------------------------------------------------------------\n def worldmap(self, figsize=(14, 7)):\n \"\"\"Worldmap plot with the number of documents per country.\n\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from techminer.datasets import load_test_cleaned\n >>> rdf = load_test_cleaned().data\n >>> from techminer.strings import *\n >>> rdf['Country'] = rdf['Affiliations'].map(lambda x: extract_country(x, sep=';'))\n >>> rdf.documents_by_terms('Country', sep=';').head()\n Country Num Documents ID\n 0 China 83 [[*3*], [*4*], [*6*], [*6*], [*7*], [*10*], [*...\n 1 Taiwan 20 [[*14*], [*14*], [*17*], [*17*], [*17*], [*17*...\n 2 United States 17 [[*3*], [*22*], [*23*], [*23*], [*26*], [*26*]...\n 3 United Kingdom 15 [[*5*], [*7*], [*11*], [*11*], [*11*], [*28*],...\n 4 India 15 [[*9*], [*50*], [*51*], [*56*], [*56*], [*57*]...\n >>> rdf.documents_by_terms('Country', sep=';').worldmap()\n\n .. image:: ../figs/worldmap.jpg\n :width: 800px\n :align: center\n \"\"\"\n\n if 'Country' not in list(self.columns):\n raise Exception('No country column found in data')\n\n world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))\n world = world[world.name != \"Antarctica\"]\n world['q'] = 0\n world.index = world.name\n\n rdf = self.copy()\n rdf['Country'] = rdf['Country'].map(\n lambda x: x.replace('United States', 'United States of America')\n )\n\n #rdf['Country'] = [w if w != else for w in rdf['Country']]\n rdf.index = rdf['Country']\n for country in rdf['Country']:\n if country in world.index:\n world.at[country, 'q'] = rdf.loc[country, 'Num Documents']\n _, axx = plt.subplots(1, 1, figsize=figsize)\n divider = make_axes_locatable(axx)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)\n world.plot(column='q', legend=True, ax=axx, cax=cax, cmap='Pastel2')\n\n #----------------------------------------------------------------------------------------------------\n\n\n\n"
]
| [
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"numpy.transpose",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
]
|
dagss/numpy_svn | [
"69f75f8167694fc935357f135e63718c1c0e7242"
]
| [
"numpy/lib/tests/test_function_base.py"
]
| [
"import warnings\n\nfrom numpy.testing import *\nimport numpy.lib\nfrom numpy.lib import *\nfrom numpy.core import *\nfrom numpy import matrix, asmatrix\n\nimport numpy as np\n\nclass TestAny(TestCase):\n def test_basic(self):\n y1 = [0, 0, 1, 0]\n y2 = [0, 0, 0, 0]\n y3 = [1, 0, 1, 0]\n assert(any(y1))\n assert(any(y3))\n assert(not any(y2))\n\n def test_nd(self):\n y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]\n assert(any(y1))\n assert_array_equal(sometrue(y1, axis=0), [1, 1, 0])\n assert_array_equal(sometrue(y1, axis=1), [0, 1, 1])\n\n\nclass TestAll(TestCase):\n def test_basic(self):\n y1 = [0, 1, 1, 0]\n y2 = [0, 0, 0, 0]\n y3 = [1, 1, 1, 1]\n assert(not all(y1))\n assert(all(y3))\n assert(not all(y2))\n assert(all(~array(y2)))\n\n def test_nd(self):\n y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]\n assert(not all(y1))\n assert_array_equal(alltrue(y1, axis=0), [0, 0, 1])\n assert_array_equal(alltrue(y1, axis=1), [0, 0, 1])\n\n\nclass TestAverage(TestCase):\n def test_basic(self):\n y1 = array([1, 2, 3])\n assert(average(y1, axis=0) == 2.)\n y2 = array([1., 2., 3.])\n assert(average(y2, axis=0) == 2.)\n y3 = [0., 0., 0.]\n assert(average(y3, axis=0) == 0.)\n\n y4 = ones((4, 4))\n y4[0, 1] = 0\n y4[1, 0] = 2\n assert_almost_equal(y4.mean(0), average(y4, 0))\n assert_almost_equal(y4.mean(1), average(y4, 1))\n\n y5 = rand(5, 5)\n assert_almost_equal(y5.mean(0), average(y5, 0))\n assert_almost_equal(y5.mean(1), average(y5, 1))\n\n y6 = matrix(rand(5, 5))\n assert_array_equal(y6.mean(0), average(y6, 0))\n\n def test_weights(self):\n y = arange(10)\n w = arange(10)\n actual = average(y, weights=w)\n desired = (arange(10) ** 2).sum()*1. / arange(10).sum()\n assert_almost_equal(actual, desired)\n\n y1 = array([[1, 2, 3], [4, 5, 6]])\n w0 = [1, 2]\n actual = average(y1, weights=w0, axis=0)\n desired = array([3., 4., 5.])\n assert_almost_equal(actual, desired)\n\n w1 = [0, 0, 1]\n actual = average(y1, weights=w1, axis=1)\n desired = array([3., 6.])\n assert_almost_equal(actual, desired)\n\n # This should raise an error. Can we test for that ?\n # assert_equal(average(y1, weights=w1), 9./2.)\n\n # 2D Case\n w2 = [[0, 0, 1], [0, 0, 2]]\n desired = array([3., 6.])\n assert_array_equal(average(y1, weights=w2, axis=1), desired)\n assert_equal(average(y1, weights=w2), 5.)\n\n def test_returned(self):\n y = array([[1, 2, 3], [4, 5, 6]])\n\n # No weights\n avg, scl = average(y, returned=True)\n assert_equal(scl, 6.)\n\n avg, scl = average(y, 0, returned=True)\n assert_array_equal(scl, array([2., 2., 2.]))\n\n avg, scl = average(y, 1, returned=True)\n assert_array_equal(scl, array([3., 3.]))\n\n # With weights\n w0 = [1, 2]\n avg, scl = average(y, weights=w0, axis=0, returned=True)\n assert_array_equal(scl, array([3., 3., 3.]))\n\n w1 = [1, 2, 3]\n avg, scl = average(y, weights=w1, axis=1, returned=True)\n assert_array_equal(scl, array([6., 6.]))\n\n w2 = [[0, 0, 1], [1, 2, 3]]\n avg, scl = average(y, weights=w2, axis=1, returned=True)\n assert_array_equal(scl, array([1., 6.]))\n\n\nclass TestSelect(TestCase):\n def _select(self, cond, values, default=0):\n output = []\n for m in range(len(cond)):\n output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]\n return output\n\n def test_basic(self):\n choices = [array([1, 2, 3]),\n array([4, 5, 6]),\n array([7, 8, 9])]\n conditions = [array([0, 0, 0]),\n array([0, 1, 0]),\n array([0, 0, 1])]\n assert_array_equal(select(conditions, choices, default=15),\n self._select(conditions, choices, default=15))\n\n assert_equal(len(choices), 3)\n assert_equal(len(conditions), 3)\n\n\nclass TestInsert(TestCase):\n def test_basic(self):\n a = [1, 2, 3]\n assert_equal(insert(a, 0, 1), [1, 1, 2, 3])\n assert_equal(insert(a, 3, 1), [1, 2, 3, 1])\n assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])\n\n\nclass TestAmax(TestCase):\n def test_basic(self):\n a = [3, 4, 5, 10, -3, -5, 6.0]\n assert_equal(amax(a), 10.0)\n b = [[3, 6.0, 9.0],\n [4, 10.0, 5.0],\n [8, 3.0, 2.0]]\n assert_equal(amax(b, axis=0), [8.0, 10.0, 9.0])\n assert_equal(amax(b, axis=1), [9.0, 10.0, 8.0])\n\n\nclass TestAmin(TestCase):\n def test_basic(self):\n a = [3, 4, 5, 10, -3, -5, 6.0]\n assert_equal(amin(a), -5.0)\n b = [[3, 6.0, 9.0],\n [4, 10.0, 5.0],\n [8, 3.0, 2.0]]\n assert_equal(amin(b, axis=0), [3.0, 3.0, 2.0])\n assert_equal(amin(b, axis=1), [3.0, 4.0, 2.0])\n\n\nclass TestPtp(TestCase):\n def test_basic(self):\n a = [3, 4, 5, 10, -3, -5, 6.0]\n assert_equal(ptp(a, axis=0), 15.0)\n b = [[3, 6.0, 9.0],\n [4, 10.0, 5.0],\n [8, 3.0, 2.0]]\n assert_equal(ptp(b, axis=0), [5.0, 7.0, 7.0])\n assert_equal(ptp(b, axis= -1), [6.0, 6.0, 6.0])\n\n\nclass TestCumsum(TestCase):\n def test_basic(self):\n ba = [1, 2, 10, 11, 6, 5, 4]\n ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]\n for ctype in [int8, uint8, int16, uint16, int32, uint32,\n float32, float64, complex64, complex128]:\n a = array(ba, ctype)\n a2 = array(ba2, ctype)\n assert_array_equal(cumsum(a, axis=0), array([1, 3, 13, 24, 30, 35, 39], ctype))\n assert_array_equal(cumsum(a2, axis=0), array([[1, 2, 3, 4], [6, 8, 10, 13],\n [16, 11, 14, 18]], ctype))\n assert_array_equal(cumsum(a2, axis=1),\n array([[1, 3, 6, 10],\n [5, 11, 18, 27],\n [10, 13, 17, 22]], ctype))\n\n\nclass TestProd(TestCase):\n def test_basic(self):\n ba = [1, 2, 10, 11, 6, 5, 4]\n ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]\n for ctype in [int16, uint16, int32, uint32,\n float32, float64, complex64, complex128]:\n a = array(ba, ctype)\n a2 = array(ba2, ctype)\n if ctype in ['1', 'b']:\n self.assertRaises(ArithmeticError, prod, a)\n self.assertRaises(ArithmeticError, prod, a2, 1)\n self.assertRaises(ArithmeticError, prod, a)\n else:\n assert_equal(prod(a, axis=0), 26400)\n assert_array_equal(prod(a2, axis=0),\n array([50, 36, 84, 180], ctype))\n assert_array_equal(prod(a2, axis= -1), array([24, 1890, 600], ctype))\n\n\nclass TestCumprod(TestCase):\n def test_basic(self):\n ba = [1, 2, 10, 11, 6, 5, 4]\n ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]\n for ctype in [int16, uint16, int32, uint32,\n float32, float64, complex64, complex128]:\n a = array(ba, ctype)\n a2 = array(ba2, ctype)\n if ctype in ['1', 'b']:\n self.assertRaises(ArithmeticError, cumprod, a)\n self.assertRaises(ArithmeticError, cumprod, a2, 1)\n self.assertRaises(ArithmeticError, cumprod, a)\n else:\n assert_array_equal(cumprod(a, axis= -1),\n array([1, 2, 20, 220,\n 1320, 6600, 26400], ctype))\n assert_array_equal(cumprod(a2, axis=0),\n array([[ 1, 2, 3, 4],\n [ 5, 12, 21, 36],\n [50, 36, 84, 180]], ctype))\n assert_array_equal(cumprod(a2, axis= -1),\n array([[ 1, 2, 6, 24],\n [ 5, 30, 210, 1890],\n [10, 30, 120, 600]], ctype))\n\n\nclass TestDiff(TestCase):\n def test_basic(self):\n x = [1, 4, 6, 7, 12]\n out = array([3, 2, 1, 5])\n out2 = array([-1, -1, 4])\n out3 = array([0, 5])\n assert_array_equal(diff(x), out)\n assert_array_equal(diff(x, n=2), out2)\n assert_array_equal(diff(x, n=3), out3)\n\n def test_nd(self):\n x = 20 * rand(10, 20, 30)\n out1 = x[:, :, 1:] - x[:, :, :-1]\n out2 = out1[:, :, 1:] - out1[:, :, :-1]\n out3 = x[1:, :, :] - x[:-1, :, :]\n out4 = out3[1:, :, :] - out3[:-1, :, :]\n assert_array_equal(diff(x), out1)\n assert_array_equal(diff(x, n=2), out2)\n assert_array_equal(diff(x, axis=0), out3)\n assert_array_equal(diff(x, n=2, axis=0), out4)\n\n\nclass TestGradient(TestCase):\n def test_basic(self):\n x = array([[1, 1], [3, 4]])\n dx = [array([[2., 3.], [2., 3.]]),\n array([[0., 0.], [1., 1.]])]\n assert_array_equal(gradient(x), dx)\n\n def test_badargs(self):\n # for 2D array, gradient can take 0,1, or 2 extra args\n x = array([[1, 1], [3, 4]])\n assert_raises(SyntaxError, gradient, x, array([1., 1.]),\n array([1., 1.]), array([1., 1.]))\n\n def test_masked(self):\n # Make sure that gradient supports subclasses like masked arrays\n x = np.ma.array([[1, 1], [3, 4]])\n assert_equal(type(gradient(x)[0]), type(x))\n\n\nclass TestAngle(TestCase):\n def test_basic(self):\n x = [1 + 3j, sqrt(2) / 2.0 + 1j * sqrt(2) / 2, 1, 1j, -1, -1j, 1 - 3j, -1 + 3j]\n y = angle(x)\n yo = [arctan(3.0 / 1.0), arctan(1.0), 0, pi / 2, pi, -pi / 2.0,\n - arctan(3.0 / 1.0), pi - arctan(3.0 / 1.0)]\n z = angle(x, deg=1)\n zo = array(yo) * 180 / pi\n assert_array_almost_equal(y, yo, 11)\n assert_array_almost_equal(z, zo, 11)\n\n\nclass TestTrimZeros(TestCase):\n \"\"\" only testing for integer splits.\n \"\"\"\n def test_basic(self):\n a = array([0, 0, 1, 2, 3, 4, 0])\n res = trim_zeros(a)\n assert_array_equal(res, array([1, 2, 3, 4]))\n\n def test_leading_skip(self):\n a = array([0, 0, 1, 0, 2, 3, 4, 0])\n res = trim_zeros(a)\n assert_array_equal(res, array([1, 0, 2, 3, 4]))\n\n def test_trailing_skip(self):\n a = array([0, 0, 1, 0, 2, 3, 0, 4, 0])\n res = trim_zeros(a)\n assert_array_equal(res, array([1, 0, 2, 3, 0, 4]))\n\n\nclass TestExtins(TestCase):\n def test_basic(self):\n a = array([1, 3, 2, 1, 2, 3, 3])\n b = extract(a > 1, a)\n assert_array_equal(b, [3, 2, 2, 3, 3])\n\n def test_place(self):\n a = array([1, 4, 3, 2, 5, 8, 7])\n place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])\n assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])\n\n def test_both(self):\n a = rand(10)\n mask = a > 0.5\n ac = a.copy()\n c = extract(mask, a)\n place(a, mask, 0)\n place(a, mask, c)\n assert_array_equal(a, ac)\n\n\nclass TestVectorize(TestCase):\n def test_simple(self):\n def addsubtract(a, b):\n if a > b:\n return a - b\n else:\n return a + b\n f = vectorize(addsubtract)\n r = f([0, 3, 6, 9], [1, 3, 5, 7])\n assert_array_equal(r, [1, 6, 1, 2])\n\n def test_scalar(self):\n def addsubtract(a, b):\n if a > b:\n return a - b\n else:\n return a + b\n f = vectorize(addsubtract)\n r = f([0, 3, 6, 9], 5)\n assert_array_equal(r, [5, 8, 1, 4])\n\n def test_large(self):\n x = linspace(-3, 2, 10000)\n f = vectorize(lambda x: x)\n y = f(x)\n assert_array_equal(y, x)\n\n def test_ufunc(self):\n import math\n f = vectorize(math.cos)\n args = array([0, 0.5*pi, pi, 1.5*pi, 2*pi])\n r1 = f(args)\n r2 = cos(args)\n assert_array_equal(r1, r2)\n\n def test_keywords(self):\n import math\n def foo(a, b=1):\n return a + b\n f = vectorize(foo)\n args = array([1,2,3])\n r1 = f(args)\n r2 = array([2,3,4])\n assert_array_equal(r1, r2)\n r1 = f(args, 2)\n r2 = array([3,4,5])\n assert_array_equal(r1, r2)\n\n def test_keywords_no_func_code(self):\n # This needs to test a function that has keywords but\n # no func_code attribute, since otherwise vectorize will\n # inspect the func_code.\n import random\n try:\n f = vectorize(random.randrange)\n except:\n raise AssertionError()\n\n\nclass TestDigitize(TestCase):\n def test_forward(self):\n x = arange(-6, 5)\n bins = arange(-5, 5)\n assert_array_equal(digitize(x, bins), arange(11))\n\n def test_reverse(self):\n x = arange(5, -6, -1)\n bins = arange(5, -5, -1)\n assert_array_equal(digitize(x, bins), arange(11))\n\n def test_random(self):\n x = rand(10)\n bin = linspace(x.min(), x.max(), 10)\n assert all(digitize(x, bin) != 0)\n\n\nclass TestUnwrap(TestCase):\n def test_simple(self):\n #check that unwrap removes jumps greather that 2*pi\n assert_array_equal(unwrap([1, 1 + 2 * pi]), [1, 1])\n #check that unwrap maintans continuity\n assert(all(diff(unwrap(rand(10) * 100)) < pi))\n\n\nclass TestFilterwindows(TestCase):\n def test_hanning(self):\n #check symmetry\n w = hanning(10)\n assert_array_almost_equal(w, flipud(w), 7)\n #check known value\n assert_almost_equal(sum(w, axis=0), 4.500, 4)\n\n def test_hamming(self):\n #check symmetry\n w = hamming(10)\n assert_array_almost_equal(w, flipud(w), 7)\n #check known value\n assert_almost_equal(sum(w, axis=0), 4.9400, 4)\n\n def test_bartlett(self):\n #check symmetry\n w = bartlett(10)\n assert_array_almost_equal(w, flipud(w), 7)\n #check known value\n assert_almost_equal(sum(w, axis=0), 4.4444, 4)\n\n def test_blackman(self):\n #check symmetry\n w = blackman(10)\n assert_array_almost_equal(w, flipud(w), 7)\n #check known value\n assert_almost_equal(sum(w, axis=0), 3.7800, 4)\n\n\nclass TestTrapz(TestCase):\n def test_simple(self):\n r = trapz(exp(-1.0 / 2 * (arange(-10, 10, .1)) ** 2) / sqrt(2 * pi), dx=0.1)\n #check integral of normal equals 1\n assert_almost_equal(sum(r, axis=0), 1, 7)\n\n def test_ndim(self):\n x = linspace(0, 1, 3)\n y = linspace(0, 2, 8)\n z = linspace(0, 3, 13)\n\n wx = ones_like(x) * (x[1] - x[0])\n wx[0] /= 2\n wx[-1] /= 2\n wy = ones_like(y) * (y[1] - y[0])\n wy[0] /= 2\n wy[-1] /= 2\n wz = ones_like(z) * (z[1] - z[0])\n wz[0] /= 2\n wz[-1] /= 2\n\n q = x[:, None, None] + y[None, :, None] + z[None, None, :]\n\n qx = (q * wx[:, None, None]).sum(axis=0)\n qy = (q * wy[None, :, None]).sum(axis=1)\n qz = (q * wz[None, None, :]).sum(axis=2)\n\n # n-d `x`\n r = trapz(q, x=x[:, None, None], axis=0)\n assert_almost_equal(r, qx)\n r = trapz(q, x=y[None, :, None], axis=1)\n assert_almost_equal(r, qy)\n r = trapz(q, x=z[None, None, :], axis=2)\n assert_almost_equal(r, qz)\n\n # 1-d `x`\n r = trapz(q, x=x, axis=0)\n assert_almost_equal(r, qx)\n r = trapz(q, x=y, axis=1)\n assert_almost_equal(r, qy)\n r = trapz(q, x=z, axis=2)\n assert_almost_equal(r, qz)\n\n\nclass TestSinc(TestCase):\n def test_simple(self):\n assert(sinc(0) == 1)\n w = sinc(linspace(-1, 1, 100))\n #check symmetry\n assert_array_almost_equal(w, flipud(w), 7)\n\n def test_array_like(self):\n x = [0, 0.5]\n y1 = sinc(array(x))\n y2 = sinc(list(x))\n y3 = sinc(tuple(x))\n assert_array_equal(y1, y2)\n assert_array_equal(y1, y3)\n\nclass TestHistogram(TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_simple(self):\n n = 100\n v = rand(n)\n (a, b) = histogram(v)\n #check if the sum of the bins equals the number of samples\n assert_equal(sum(a, axis=0), n)\n #check that the bin counts are evenly spaced when the data is from a\n # linear function\n (a, b) = histogram(linspace(0, 10, 100))\n assert_array_equal(a, 10)\n\n def test_one_bin(self):\n # Ticket 632\n hist, edges = histogram([1, 2, 3, 4], [1, 2])\n assert_array_equal(hist, [2, ])\n assert_array_equal(edges, [1, 2])\n\n def test_normed(self):\n # Check that the integral of the density equals 1.\n n = 100\n v = rand(n)\n a, b = histogram(v, normed=True)\n area = sum(a * diff(b))\n assert_almost_equal(area, 1)\n\n # Check with non constant bin width\n v = rand(n) * 10\n bins = [0, 1, 5, 9, 10]\n a, b = histogram(v, bins, normed=True)\n area = sum(a * diff(b))\n assert_almost_equal(area, 1)\n\n def test_outliers(self):\n # Check that outliers are not tallied\n a = arange(10) + .5\n\n # Lower outliers\n h, b = histogram(a, range=[0, 9])\n assert_equal(h.sum(), 9)\n\n # Upper outliers\n h, b = histogram(a, range=[1, 10])\n assert_equal(h.sum(), 9)\n\n # Normalization\n h, b = histogram(a, range=[1, 9], normed=True)\n assert_equal((h * diff(b)).sum(), 1)\n\n # Weights\n w = arange(10) + .5\n h, b = histogram(a, range=[1, 9], weights=w, normed=True)\n assert_equal((h * diff(b)).sum(), 1)\n\n h, b = histogram(a, bins=8, range=[1, 9], weights=w)\n assert_equal(h, w[1:-1])\n\n def test_type(self):\n # Check the type of the returned histogram\n a = arange(10) + .5\n h, b = histogram(a)\n assert(issubdtype(h.dtype, int))\n\n h, b = histogram(a, normed=True)\n assert(issubdtype(h.dtype, float))\n\n h, b = histogram(a, weights=ones(10, int))\n assert(issubdtype(h.dtype, int))\n\n h, b = histogram(a, weights=ones(10, float))\n assert(issubdtype(h.dtype, float))\n\n def test_weights(self):\n v = rand(100)\n w = ones(100) * 5\n a, b = histogram(v)\n na, nb = histogram(v, normed=True)\n wa, wb = histogram(v, weights=w)\n nwa, nwb = histogram(v, weights=w, normed=True)\n assert_array_almost_equal(a * 5, wa)\n assert_array_almost_equal(na, nwa)\n\n # Check weights are properly applied.\n v = linspace(0, 10, 10)\n w = concatenate((zeros(5), ones(5)))\n wa, wb = histogram(v, bins=arange(11), weights=w)\n assert_array_almost_equal(wa, w)\n\n # Check with integer weights\n wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])\n assert_array_equal(wa, [4, 5, 0, 1])\n wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True)\n assert_array_equal(wa, array([4, 5, 0, 1]) / 10. / 3. * 4)\n\n\nclass TestHistogramdd(TestCase):\n def test_simple(self):\n x = array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], \\\n [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])\n H, edges = histogramdd(x, (2, 3, 3), range=[[-1, 1], [0, 3], [0, 3]])\n answer = asarray([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 0, 1],\n [0, 0, 1]]])\n assert_array_equal(H, answer)\n # Check normalization\n ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]\n H, edges = histogramdd(x, bins=ed, normed=True)\n assert(all(H == answer / 12.))\n # Check that H has the correct shape.\n H, edges = histogramdd(x, (2, 3, 4), range=[[-1, 1], [0, 3], [0, 4]],\n normed=True)\n answer = asarray([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], [[0, 1, 0, 0],\n [0, 0, 1, 0], [0, 0, 1, 0]]])\n assert_array_almost_equal(H, answer / 6., 4)\n # Check that a sequence of arrays is accepted and H has the correct\n # shape.\n z = [squeeze(y) for y in split(x, 3, axis=1)]\n H, edges = histogramdd(z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])\n answer = asarray([[[0, 0], [0, 0], [0, 0]],\n [[0, 1], [0, 0], [1, 0]],\n [[0, 1], [0, 0], [0, 0]],\n [[0, 0], [0, 0], [0, 0]]])\n assert_array_equal(H, answer)\n\n Z = zeros((5, 5, 5))\n Z[range(5), range(5), range(5)] = 1.\n H, edges = histogramdd([arange(5), arange(5), arange(5)], 5)\n assert_array_equal(H, Z)\n\n def test_shape_3d(self):\n # All possible permutations for bins of different lengths in 3D.\n bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),\n (4, 5, 6))\n r = rand(10, 3)\n for b in bins:\n H, edges = histogramdd(r, b)\n assert(H.shape == b)\n\n def test_shape_4d(self):\n # All possible permutations for bins of different lengths in 4D.\n bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),\n (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),\n (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),\n (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),\n (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),\n (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))\n\n r = rand(10, 4)\n for b in bins:\n H, edges = histogramdd(r, b)\n assert(H.shape == b)\n\n def test_weights(self):\n v = rand(100, 2)\n hist, edges = histogramdd(v)\n n_hist, edges = histogramdd(v, normed=True)\n w_hist, edges = histogramdd(v, weights=ones(100))\n assert_array_equal(w_hist, hist)\n w_hist, edges = histogramdd(v, weights=ones(100) * 2, normed=True)\n assert_array_equal(w_hist, n_hist)\n w_hist, edges = histogramdd(v, weights=ones(100, int) * 2)\n assert_array_equal(w_hist, 2 * hist)\n\n def test_identical_samples(self):\n x = zeros((10, 2), int)\n hist, edges = histogramdd(x, bins=2)\n assert_array_equal(edges[0], array([-0.5, 0. , 0.5]))\n\n\nclass TestUnique(TestCase):\n def test_simple(self):\n x = array([4, 3, 2, 1, 1, 2, 3, 4, 0])\n assert(all(unique(x) == [0, 1, 2, 3, 4]))\n assert(unique(array([1, 1, 1, 1, 1])) == array([1]))\n x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']\n assert(all(unique(x) == ['bar', 'foo', 'ham', 'widget']))\n x = array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])\n assert(all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))\n\n\nclass TestCheckFinite(TestCase):\n def test_simple(self):\n a = [1, 2, 3]\n b = [1, 2, inf]\n c = [1, 2, nan]\n numpy.lib.asarray_chkfinite(a)\n assert_raises(ValueError, numpy.lib.asarray_chkfinite, b)\n assert_raises(ValueError, numpy.lib.asarray_chkfinite, c)\n\n\nclass TestNaNFuncts(TestCase):\n def setUp(self):\n self.A = array([[[ nan, 0.01319214, 0.01620964],\n [ 0.11704017, nan, 0.75157887],\n [ 0.28333658, 0.1630199 , nan ]],\n [[ 0.59541557, nan, 0.37910852],\n [ nan, 0.87964135, nan ],\n [ 0.70543747, nan, 0.34306596]],\n [[ 0.72687499, 0.91084584, nan ],\n [ 0.84386844, 0.38944762, 0.23913896],\n [ nan, 0.37068164, 0.33850425]]])\n\n def test_nansum(self):\n assert_almost_equal(nansum(self.A), 8.0664079100000006)\n assert_almost_equal(nansum(self.A, 0),\n array([[ 1.32229056, 0.92403798, 0.39531816],\n [ 0.96090861, 1.26908897, 0.99071783],\n [ 0.98877405, 0.53370154, 0.68157021]]))\n assert_almost_equal(nansum(self.A, 1),\n array([[ 0.40037675, 0.17621204, 0.76778851],\n [ 1.30085304, 0.87964135, 0.72217448],\n [ 1.57074343, 1.6709751 , 0.57764321]]))\n assert_almost_equal(nansum(self.A, 2),\n array([[ 0.02940178, 0.86861904, 0.44635648],\n [ 0.97452409, 0.87964135, 1.04850343],\n [ 1.63772083, 1.47245502, 0.70918589]]))\n\n def test_nanmin(self):\n assert_almost_equal(nanmin(self.A), 0.01319214)\n assert_almost_equal(nanmin(self.A, 0),\n array([[ 0.59541557, 0.01319214, 0.01620964],\n [ 0.11704017, 0.38944762, 0.23913896],\n [ 0.28333658, 0.1630199 , 0.33850425]]))\n assert_almost_equal(nanmin(self.A, 1),\n array([[ 0.11704017, 0.01319214, 0.01620964],\n [ 0.59541557, 0.87964135, 0.34306596],\n [ 0.72687499, 0.37068164, 0.23913896]]))\n assert_almost_equal(nanmin(self.A, 2),\n array([[ 0.01319214, 0.11704017, 0.1630199 ],\n [ 0.37910852, 0.87964135, 0.34306596],\n [ 0.72687499, 0.23913896, 0.33850425]]))\n assert nanmin([nan, nan]) is nan\n\n def test_nanargmin(self):\n assert_almost_equal(nanargmin(self.A), 1)\n assert_almost_equal(nanargmin(self.A, 0),\n array([[1, 0, 0],\n [0, 2, 2],\n [0, 0, 2]]))\n assert_almost_equal(nanargmin(self.A, 1),\n array([[1, 0, 0],\n [0, 1, 2],\n [0, 2, 1]]))\n assert_almost_equal(nanargmin(self.A, 2),\n array([[1, 0, 1],\n [2, 1, 2],\n [0, 2, 2]]))\n\n def test_nanmax(self):\n assert_almost_equal(nanmax(self.A), 0.91084584000000002)\n assert_almost_equal(nanmax(self.A, 0),\n array([[ 0.72687499, 0.91084584, 0.37910852],\n [ 0.84386844, 0.87964135, 0.75157887],\n [ 0.70543747, 0.37068164, 0.34306596]]))\n assert_almost_equal(nanmax(self.A, 1),\n array([[ 0.28333658, 0.1630199 , 0.75157887],\n [ 0.70543747, 0.87964135, 0.37910852],\n [ 0.84386844, 0.91084584, 0.33850425]]))\n assert_almost_equal(nanmax(self.A, 2),\n array([[ 0.01620964, 0.75157887, 0.28333658],\n [ 0.59541557, 0.87964135, 0.70543747],\n [ 0.91084584, 0.84386844, 0.37068164]]))\n\n def test_nanmin_allnan_on_axis(self):\n assert_array_equal(isnan(nanmin([[nan] * 2] * 3, axis=1)),\n [True, True, True])\n\n def test_nanmin_masked(self):\n a = np.ma.fix_invalid([[2, 1, 3, nan], [5, 2, 3, nan]])\n ctrl_mask = a._mask.copy()\n test = np.nanmin(a, axis=1)\n assert_equal(test, [1, 2])\n assert_equal(a._mask, ctrl_mask)\n assert_equal(np.isinf(a), np.zeros((2, 4), dtype=bool))\n\n\nclass TestNanFunctsIntTypes(TestCase):\n\n int_types = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)\n\n def setUp(self, *args, **kwargs):\n self.A = array([127, 39, 93, 87, 46])\n\n def integer_arrays(self):\n for dtype in self.int_types:\n yield self.A.astype(dtype)\n\n def test_nanmin(self):\n min_value = min(self.A)\n for A in self.integer_arrays():\n assert_equal(nanmin(A), min_value)\n\n def test_nanmax(self):\n max_value = max(self.A)\n for A in self.integer_arrays():\n assert_equal(nanmax(A), max_value)\n\n def test_nanargmin(self):\n min_arg = argmin(self.A)\n for A in self.integer_arrays():\n assert_equal(nanargmin(A), min_arg)\n\n def test_nanargmax(self):\n max_arg = argmax(self.A)\n for A in self.integer_arrays():\n assert_equal(nanargmax(A), max_arg)\n\n\nclass TestCorrCoef(TestCase):\n A = array([[ 0.15391142, 0.18045767, 0.14197213],\n [ 0.70461506, 0.96474128, 0.27906989],\n [ 0.9297531 , 0.32296769, 0.19267156]])\n B = array([[ 0.10377691, 0.5417086 , 0.49807457],\n [ 0.82872117, 0.77801674, 0.39226705],\n [ 0.9314666 , 0.66800209, 0.03538394]])\n res1 = array([[ 1. , 0.9379533 , -0.04931983],\n [ 0.9379533 , 1. , 0.30007991],\n [-0.04931983, 0.30007991, 1. ]])\n res2 = array([[ 1. , 0.9379533 , -0.04931983,\n 0.30151751, 0.66318558, 0.51532523],\n [ 0.9379533 , 1. , 0.30007991,\n - 0.04781421, 0.88157256, 0.78052386],\n [-0.04931983, 0.30007991, 1. ,\n - 0.96717111, 0.71483595, 0.83053601],\n [ 0.30151751, -0.04781421, -0.96717111,\n 1. , -0.51366032, -0.66173113],\n [ 0.66318558, 0.88157256, 0.71483595,\n - 0.51366032, 1. , 0.98317823],\n [ 0.51532523, 0.78052386, 0.83053601,\n - 0.66173113, 0.98317823, 1. ]])\n\n def test_simple(self):\n assert_almost_equal(corrcoef(self.A), self.res1)\n assert_almost_equal(corrcoef(self.A, self.B), self.res2)\n\n def test_ddof(self):\n assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)\n assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)\n\n\nclass Test_i0(TestCase):\n def test_simple(self):\n assert_almost_equal(i0(0.5), array(1.0634833707413234))\n A = array([ 0.49842636, 0.6969809 , 0.22011976, 0.0155549])\n assert_almost_equal(i0(A),\n array([ 1.06307822, 1.12518299, 1.01214991, 1.00006049]))\n B = array([[ 0.827002 , 0.99959078],\n [ 0.89694769, 0.39298162],\n [ 0.37954418, 0.05206293],\n [ 0.36465447, 0.72446427],\n [ 0.48164949, 0.50324519]])\n assert_almost_equal(i0(B),\n array([[ 1.17843223, 1.26583466],\n [ 1.21147086, 1.0389829 ],\n [ 1.03633899, 1.00067775],\n [ 1.03352052, 1.13557954],\n [ 1.0588429 , 1.06432317]]))\n\n\nclass TestKaiser(TestCase):\n def test_simple(self):\n assert_almost_equal(kaiser(0, 1.0), array([]))\n assert isfinite(kaiser(1, 1.0))\n assert_almost_equal(kaiser(2, 1.0), array([ 0.78984831, 0.78984831]))\n assert_almost_equal(kaiser(5, 1.0),\n array([ 0.78984831, 0.94503323, 1. ,\n 0.94503323, 0.78984831]))\n assert_almost_equal(kaiser(5, 1.56789),\n array([ 0.58285404, 0.88409679, 1. ,\n 0.88409679, 0.58285404]))\n\n def test_int_beta(self):\n kaiser(3, 4)\n\n\nclass TestMsort(TestCase):\n def test_simple(self):\n A = array([[ 0.44567325, 0.79115165, 0.5490053 ],\n [ 0.36844147, 0.37325583, 0.96098397],\n [ 0.64864341, 0.52929049, 0.39172155]])\n assert_almost_equal(msort(A),\n array([[ 0.36844147, 0.37325583, 0.39172155],\n [ 0.44567325, 0.52929049, 0.5490053 ],\n [ 0.64864341, 0.79115165, 0.96098397]]))\n\n\nclass TestMeshgrid(TestCase):\n def test_simple(self):\n [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])\n assert all(X == array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]))\n assert all(Y == array([[4, 4, 4],\n [5, 5, 5],\n [6, 6, 6],\n [7, 7, 7]]))\n\n\nclass TestPiecewise(TestCase):\n def test_simple(self):\n # Condition is single bool list\n x = piecewise([0, 0], [True, False], [1])\n assert_array_equal(x, [1, 0])\n\n # List of conditions: single bool list\n x = piecewise([0, 0], [[True, False]], [1])\n assert_array_equal(x, [1, 0])\n\n # Conditions is single bool array\n x = piecewise([0, 0], array([True, False]), [1])\n assert_array_equal(x, [1, 0])\n\n # Condition is single int array\n x = piecewise([0, 0], array([1, 0]), [1])\n assert_array_equal(x, [1, 0])\n\n # List of conditions: int array\n x = piecewise([0, 0], [array([1, 0])], [1])\n assert_array_equal(x, [1, 0])\n\n\n x = piecewise([0, 0], [[False, True]], [lambda x:-1])\n assert_array_equal(x, [0, -1])\n\n x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])\n assert_array_equal(x, [3, 4])\n\n def test_default(self):\n # No value specified for x[1], should be 0\n x = piecewise([1, 2], [True, False], [2])\n assert_array_equal(x, [2, 0])\n\n # Should set x[1] to 3\n x = piecewise([1, 2], [True, False], [2, 3])\n assert_array_equal(x, [2, 3])\n\n def test_0d(self):\n x = array(3)\n y = piecewise(x, x > 3, [4, 0])\n assert y.ndim == 0\n assert y == 0\n\n\nclass TestBincount(TestCase):\n def test_simple(self):\n y = np.bincount(np.arange(4))\n assert_array_equal(y, np.ones(4))\n\n def test_simple2(self):\n y = np.bincount(np.array([1, 5, 2, 4, 1]))\n assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))\n\n def test_simple_weight(self):\n x = np.arange(4)\n w = np.array([0.2, 0.3, 0.5, 0.1])\n y = np.bincount(x, w)\n assert_array_equal(y, w)\n\n def test_simple_weight2(self):\n x = np.array([1, 2, 4, 5, 2])\n w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])\n y = np.bincount(x, w)\n assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))\n\n\nclass TestInterp(TestCase):\n def test_exceptions(self):\n assert_raises(ValueError, interp, 0, [], [])\n assert_raises(ValueError, interp, 0, [0], [1, 2])\n\n def test_basic(self):\n x = np.linspace(0, 1, 5)\n y = np.linspace(0, 1, 5)\n x0 = np.linspace(0, 1, 50)\n assert_almost_equal(np.interp(x0, x, y), x0)\n\n def test_right_left_behavior(self):\n assert_equal(interp([-1, 0, 1], [0], [1]), [1,1,1])\n assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0,1,1])\n assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1,1,0])\n assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0,1,0])\n\n def test_scalar_interpolation_point(self):\n x = np.linspace(0, 1, 5)\n y = np.linspace(0, 1, 5)\n x0 = 0\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = .3\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = np.float32(.3)\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = np.float64(.3)\n assert_almost_equal(np.interp(x0, x, y), x0)\n\n def test_zero_dimensional_interpolation_point(self):\n x = np.linspace(0, 1, 5)\n y = np.linspace(0, 1, 5)\n x0 = np.array(.3)\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = np.array(.3, dtype=object)\n assert_almost_equal(np.interp(x0, x, y), .3)\n\n\ndef compare_results(res, desired):\n for i in range(len(desired)):\n assert_array_equal(res[i], desired[i])\n\n\ndef test_percentile_list():\n assert_equal(np.percentile([1, 2, 3], 0), 1)\n\ndef test_percentile_out():\n x = np.array([1, 2, 3])\n y = np.zeros((3,))\n p = (1, 2, 3)\n np.percentile(x, p, out=y)\n assert_equal(y, np.percentile(x, p))\n\n x = np.array([[1, 2, 3],\n [4, 5, 6]])\n\n y = np.zeros((3, 3))\n np.percentile(x, p, axis=0, out=y)\n assert_equal(y, np.percentile(x, p, axis=0))\n\n y = np.zeros((3, 2))\n np.percentile(x, p, axis=1, out=y)\n assert_equal(y, np.percentile(x, p, axis=1))\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
]
| [
[
"numpy.bincount",
"numpy.isinf",
"numpy.array",
"numpy.ma.fix_invalid",
"numpy.zeros",
"numpy.percentile",
"numpy.ones",
"numpy.interp",
"numpy.nanmin",
"numpy.float64",
"numpy.float32",
"numpy.ma.array",
"numpy.arange",
"numpy.linspace"
]
]
|
eWaterCycle/streamingDataAssimilation | [
"00e05cbd45588439503c961e2f7c915baf30d8e6"
]
| [
"EnKF.py"
]
| [
"#! /usr/bin/env python\nimport numpy as np\nimport math\n\n\ndef EnKF(ensembleForecast, observationEnsemble, transformation):\n #use notation from Evenson 2003.\n \n p = 0.99 # % of Eigenvectors to include in svd method to ensure stability.\n\n A=ensembleForecast\n D=observationEnsemble\n H=transformation\n N=np.shape(A)[1]\n\n Apr = A @ (np.eye(N)-(1/N)*np.ones(N))\n Dpr = D - H(A)\n gamma=D @ (np.eye(N)-(1/N)*np.ones(N))\n\n\n #now calculate [U,S,V]=SVD(H*Apr+gamma)\n Utemp,Sigma,vH = np.linalg.svd(H(Apr)+gamma)\n #only keep p % of singular values, discard minimum of 1.\n pN = max(1, math.floor(N * (1-p)))\n\n U=np.zeros([np.shape(D)[0],N])\n\n U[0:(np.shape(Utemp)[0]),0:min((np.shape(Utemp)[1]-pN-1),np.shape(U)[1]-pN-1)]=Utemp[:,0:min((np.shape(Utemp)[1]-pN-1),np.shape(U)[1]-pN-1)]\n\n Sigma=np.diag(Sigma) @ np.diag(Sigma).T\n Sigma = np.power(np.diag(Sigma),-1)\n Sigma[-pN:] = 0\n LambdaInv = np.zeros([N,N])\n LambdaInv[0:np.shape(Sigma)[0],0:np.shape(Sigma)[0]]=np.diag(Sigma)\n\n X1=LambdaInv @ U.T\n X2=X1 @ Dpr\n X3=U @ X2\n X4=H(Apr).T @ X3\n ensembleAnalyses = A + Apr @ X4\n return ensembleAnalyses\n"
]
| [
[
"numpy.zeros",
"numpy.ones",
"numpy.shape",
"numpy.eye",
"numpy.diag"
]
]
|
mmore500/tag-olympics | [
"23f89aa278b2866220696eddb89ecaa860e2d778"
]
| [
"script/LowGraphAnalysisStatsPlot.py"
]
| [
"import matplotlib\nmatplotlib.use('Agg')\nimport pandas as pd\nimport seaborn as sns\nimport sys\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nfrom slugify import slugify\n\nfrom keyname import keyname as kn\nfrom fileshash import fileshash as fsh\n\n# open-type fonts\nmatplotlib.rcParams['pdf.fonttype'] = 42\n\ndataframe_filename = sys.argv[2]\n\ndf_key = pd.read_csv(sys.argv[1])\n\ndf_data = pd.read_csv(dataframe_filename)\n\nkey = {\n row['Metric'] : {\n col : row[col]\n for col, val in row.iteritems() if col != 'Metric'\n }\n for idx, row in df_key.iterrows()\n}\n\ndf_data['Dimension'] = df_data.apply(\n lambda x: key[x['Metric']]['Dimension'],\n axis=1\n)\n\ndf_data['Dimension Type'] = df_data.apply(\n lambda x: key[x['Metric']]['Dimension Type'],\n axis=1\n)\n\ndf_data['Inverse'] = df_data.apply(\n lambda x: key[x['Metric']]['Inverse'],\n axis=1\n)\n\ndf_data['Type'] = df_data.apply(\n lambda x: (\n (\"Inverse \" if x['Inverse'] else \"Direct\") + \" / \" + x['Dimension Type']\n ),\n axis=1\n)\n\ndf_data['Metric'] = df_data.apply(\n lambda x: (\n ('Sliding ' if key[x['Metric']]['Sliding'] else '')\n + key[x['Metric']]['Base Metric']\n ),\n axis=1\n)\n\nfor col in (\n col for col in list(df_data)\n if col not in [\n \"Weight\", \"Metric\", \"Sample\", \"Dimension\",\n \"Inverse\", \"Dimension Type\", \"Type\"\n ]):\n\n g = sns.FacetGrid(\n df_data,\n col='Metric',\n row='Type',\n hue='Inverse',\n margin_titles=True\n ).set(xlim=(-1, 2))\n g.map(sns.barplot, \"Dimension\", col)\n\n outfile = kn.pack({\n 'title' : kn.unpack(dataframe_filename)['title'],\n 'bitweight' : kn.unpack(dataframe_filename)['bitweight'],\n 'seed' : kn.unpack(dataframe_filename)['seed'],\n 'stat' : slugify(col),\n '_data_hathash_hash' : fsh.FilesHash().hash_files([dataframe_filename]),\n '_script_fullcat_hash' : fsh.FilesHash(\n file_parcel=\"full_parcel\",\n files_join=\"cat_join\"\n ).hash_files([sys.argv[0]]),\n # '_source_hash' :kn.unpack(dataframe_filename)['_source_hash'],\n 'ext' : '.pdf'\n })\n plt.savefig(\n outfile,\n transparent=True,\n bbox_inches='tight',\n pad_inches=0\n )\n print(\"output saved to\", outfile)\n"
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.savefig",
"pandas.read_csv"
]
]
|
Fuann/TENET | [
"357ceacd7ef820932e4208c14d2d998d537010c8"
]
| [
"sptk/compute_similar_score.py"
]
| [
"#!/usr/bin/env python\n\n# wujian@2018\n\"\"\"\nCompute score for speaker varification tasks\n\"\"\"\n\nimport argparse\nimport numpy as np\n\nfrom libs.data_handler import NumpyReader, ScriptReader, parse_scps\nfrom libs.utils import get_logger\n\nlogger = get_logger(__name__)\n\n\ndef run(args):\n utt2spk = parse_scps(args.utt2spk)\n\n def Reader(scp, t):\n return NumpyReader(scp) if t == \"numpy\" else ScriptReader(scp,\n matrix=False)\n\n spks_reader = Reader(args.spks_scp, args.type)\n spks_keys, spks_embs = [], []\n for spkid, spkvec in spks_reader:\n spks_keys.append(spkid)\n spks_embs.append(spkvec)\n spks_mat = np.stack(spks_embs)\n if args.normalize:\n spks_mat = np.linalg.norm(spks_mat, axis=1, ord=2, keepdims=True)\n logger.info(\"Load {:d} speakers from enrollment embeddings\".format(\n len(spks_keys)))\n\n eval_reader = Reader(args.eval_scp, args.type)\n for uttid, uttvec in eval_reader:\n spkid = utt2spk[uttid]\n if args.normalize:\n uttvec = uttvec / np.linalg.norm(uttvec)\n if spkid not in spks_keys:\n raise RuntimeError(\n \"Seems speaker {} do not exist in enrollment set\".format(\n spkid))\n # using dot product, because embeddings has been normalized\n # 1 x N\n score_mat = uttvec @ np.transpose(spks_mat)\n for index, cmpid in enumerate(spks_keys):\n print(\"{:.2f} {}\".format(\n score_mat[index], \"target\" if cmpid == spkid else \"nontarget\"))\n logger.info(\"Compute scores for {:d} utterances done\".format(\n len(eval_reader)))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Command to compute scores between candidate embeddings \"\n \"and registered ones, output results to stdout, which could \"\n \"be used to compute eer using compute-eer in kaldi.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"spks_scp\",\n type=str,\n help=\"Embedding rspecifier computed \"\n \"from enrollment utterances\")\n parser.add_argument(\"eval_scp\",\n type=str,\n help=\"Embedding rspecifier to evaluate perfermance\")\n parser.add_argument(\"--utt2spk\",\n type=str,\n required=True,\n help=\"Rspecifier for utterance to speaker map\")\n parser.add_argument(\"--vector-type\",\n dest=\"type\",\n type=str,\n choices=[\"kaldi\", \"numpy\"],\n default=\"kaldi\",\n help=\"Storage format for embeddings\")\n parser.add_argument(\"--normalize\",\n action=\"store_true\",\n help=\"If true, normalize embeddings \"\n \"before compute dot product\")\n args = parser.parse_args()\n run(args)"
]
| [
[
"numpy.stack",
"numpy.transpose",
"numpy.linalg.norm"
]
]
|
LukasSchaefer/lb-foraging | [
"11e9b2346713695192d247b97094ad5f1e7dd480"
]
| [
"lbforaging.py"
]
| [
"import argparse\nimport logging\nimport random\nimport time\nimport gym\nimport numpy as np\nimport lbforaging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _game_loop(env, render):\n \"\"\"\n \"\"\"\n obs = env.reset()\n done = False\n\n if render:\n env.render()\n time.sleep(0.5)\n\n while not done:\n\n actions = env.action_space.sample()\n\n nobs, nreward, ndone, _ = env.step(actions)\n if sum(nreward) > 0:\n print(nreward)\n\n if render:\n env.render()\n time.sleep(0.5)\n\n done = np.all(ndone)\n # print(env.players[0].score, env.players[1].score)\n\n\ndef main(game_count=1, render=False):\n env = gym.make(\"Foraging-8x8-2p-2f-v2\")\n obs = env.reset()\n\n for episode in range(game_count):\n _game_loop(env, render)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Play the level foraging game.\")\n\n parser.add_argument(\"--render\", action=\"store_true\")\n parser.add_argument(\n \"--times\", type=int, default=1, help=\"How many times to run the game\"\n )\n\n args = parser.parse_args()\n main(args.times, args.render)\n"
]
| [
[
"numpy.all"
]
]
|
jimwwalker/perfrunner | [
"fda19ce9d860f8548b3feeb761b9a70598500cce"
]
| [
"spring/querygen.py"
]
| [
"from itertools import cycle\nfrom typing import List, Tuple\n\nfrom couchbase.n1ql import N1QLQuery\nfrom couchbase.views.params import ViewQuery\nfrom numpy import random\n\n\nclass ViewQueryGen:\n\n PARAMS = {\n 'limit': 30,\n 'stale': 'update_after',\n }\n\n QUERIES_PER_VIEW = {\n 'id_by_city': 9,\n 'name_and_email_by_category_and_coins': 6,\n 'id_by_realm_and_coins': 5,\n 'name_and_email_by_city': 9,\n 'name_by_category_and_coins': 6,\n 'experts_id_by_realm_and_coins': 5,\n 'id_by_realm': 9,\n 'achievements_by_category_and_coins': 6,\n 'name_and_email_by_realm_and_coins': 5,\n 'experts_coins_by_name': 9,\n }\n\n def __init__(self, ddocs: dict, params: dict):\n self.params = dict(self.PARAMS, **params)\n\n self.view_sequence = []\n for ddoc_name, ddoc in ddocs.items():\n for view_name in ddoc['views']:\n self.view_sequence += \\\n [(ddoc_name, view_name)] * self.QUERIES_PER_VIEW[view_name]\n random.shuffle(self.view_sequence)\n self.view_sequence = cycle(self.view_sequence)\n\n @staticmethod\n def generate_params(category: str,\n city: str,\n realm: str,\n name: str,\n coins: float,\n **kwargs) -> dict:\n return {\n 'id_by_city': {\n 'key': city,\n },\n 'name_and_email_by_city': {\n 'key': city,\n },\n 'id_by_realm': {\n 'startkey': realm,\n },\n 'experts_coins_by_name': {\n 'startkey': name,\n 'descending': True,\n },\n 'name_by_category_and_coins': {\n 'startkey': [category, 0],\n 'endkey': [category, coins],\n },\n 'name_and_email_by_category_and_coins': {\n 'startkey': [category, 0],\n 'endkey': [category, coins],\n },\n 'achievements_by_category_and_coins': {\n 'startkey': [category, 0],\n 'endkey': [category, coins],\n },\n 'id_by_realm_and_coins': {\n 'startkey': [realm, coins],\n 'endkey': [realm, 10000],\n },\n 'name_and_email_by_realm_and_coins': {\n 'startkey': [realm, coins],\n 'endkey': [realm, 10000],\n },\n 'experts_id_by_realm_and_coins': {\n 'startkey': [realm, coins],\n 'endkey': [realm, 10000],\n },\n }\n\n def next(self, doc: dict) -> Tuple[str, str, ViewQuery]:\n ddoc_name, view_name = next(self.view_sequence)\n params = self.generate_params(**doc)[view_name]\n params = dict(self.params, **params)\n return ddoc_name, view_name, ViewQuery(**params)\n\n\nclass ViewQueryGenByType:\n\n PARAMS = {\n 'limit': 20,\n 'stale': 'update_after',\n }\n\n DDOC_NAME = 'ddoc'\n\n VIEWS_PER_TYPE = {\n 'basic': (\n 'name_and_street_by_city',\n 'name_and_email_by_county',\n 'achievements_by_realm',\n ),\n 'range': (\n 'name_by_coins',\n 'email_by_achievement_and_category',\n 'street_by_year_and_coins',\n ),\n 'group_by': (\n 'coins_stats_by_state_and_year',\n 'coins_stats_by_gmtime_and_year',\n 'coins_stats_by_full_state_and_year',\n ),\n 'multi_emits': (\n 'name_and_email_and_street_and_achievements_and_coins_by_city',\n 'street_and_name_and_email_and_achievement_and_coins_by_county',\n 'category_name_and_email_and_street_and_gmtime_and_year_by_country',\n ),\n 'compute': (\n 'calc_by_city',\n 'calc_by_county',\n 'calc_by_realm',\n ),\n 'body': (\n 'body_by_city',\n 'body_by_realm',\n 'body_by_country',\n ),\n 'distinct': (\n 'distinct_states',\n 'distinct_full_states',\n 'distinct_years',\n ),\n }\n\n def __init__(self, index_type: str, params: dict):\n self.params = dict(self.PARAMS, **params)\n\n self.view_sequence = cycle(self.VIEWS_PER_TYPE[index_type])\n\n @staticmethod\n def generate_params(city: dict,\n county: dict,\n country: dict,\n realm: dict,\n state: dict,\n full_state: dict,\n coins: dict,\n category: str,\n year: int,\n achievements: List[int],\n gmtime: Tuple[int],\n **kwargs) -> dict:\n return {\n 'name_and_street_by_city': {\n 'key': city['f']['f'],\n },\n 'name_and_email_by_county': {\n 'key': county['f']['f'],\n },\n 'achievements_by_realm': {\n 'key': realm['f'],\n },\n 'name_by_coins': {\n 'startkey': coins['f'] * 0.5,\n 'endkey': coins['f'],\n },\n 'email_by_achievement_and_category': {\n 'startkey': [0, category],\n 'endkey': [achievements[0], category],\n },\n 'street_by_year_and_coins': {\n 'startkey': [year, coins['f']],\n 'endkey': [year, 655.35],\n },\n 'coins_stats_by_state_and_year': {\n 'key': [state['f'], year],\n 'group': 'true'\n },\n 'coins_stats_by_gmtime_and_year': {\n 'key': [gmtime, year],\n 'group_level': 2\n },\n 'coins_stats_by_full_state_and_year': {\n 'key': [full_state['f'], year],\n 'group': 'true'\n },\n 'name_and_email_and_street_and_achievements_and_coins_by_city': {\n 'key': city['f']['f'],\n },\n 'street_and_name_and_email_and_achievement_and_coins_by_county': {\n 'key': county['f']['f'],\n },\n 'category_name_and_email_and_street_and_gmtime_and_year_by_country': {\n 'key': country['f'],\n },\n 'calc_by_city': {\n 'key': city['f']['f'],\n },\n 'calc_by_county': {\n 'key': county['f']['f'],\n },\n 'calc_by_realm': {\n 'key': realm['f'],\n },\n 'body_by_city': {\n 'key': city['f']['f'],\n },\n 'body_by_realm': {\n 'key': realm['f'],\n },\n 'body_by_country': {\n 'key': country['f'],\n },\n }\n\n def next(self, doc: dict) -> Tuple[str, str, ViewQuery]:\n view_name = next(self.view_sequence)\n params = self.generate_params(**doc)[view_name]\n params = dict(self.params, **params)\n return self.DDOC_NAME, view_name, ViewQuery(**params)\n\n\nclass N1QLQueryGen:\n\n def __init__(self, queries: List[dict]):\n queries = [\n (query['statement'], query['args'], query.get('scan_consistency'), query.get('ad_hoc'))\n for query in queries\n ]\n self.queries = cycle(queries)\n\n def generate_query(self):\n return\n\n def next(self, key: str, doc: dict) -> N1QLQuery:\n statement, args, scan_consistency, ad_hoc = next(self.queries)\n if 'key' in args:\n args = [key]\n else:\n args = args.format(**doc)\n args = eval(args)\n\n n1ql_query = N1QLQuery(statement, *args)\n n1ql_query.cross_bucket = True\n n1ql_query.adhoc = bool(ad_hoc)\n n1ql_query.consistency = scan_consistency or 'not_bounded'\n\n return n1ql_query\n"
]
| [
[
"numpy.random.shuffle"
]
]
|
darrenr10/web-scraping-challenge | [
"ae95871ed081465ac5202aefbac5088f09ab4a12"
]
| [
"scrape_mars.py"
]
| [
"from bs4 import BeautifulSoup as bs\nimport requests\nimport pymongo\nfrom splinter import Browser\nfrom flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport pandas as pd\n\ndef br_browser():\n executable_path = {'executable_path': 'chromedriver.exe'}\n return Browser('chrome', **executable_path, headless=False)\n\ndef scrape():\n browser = br_browser()\n mars_list = {}\n\n #mars news scrape\n browser.visit('https://mars.nasa.gov/news/')\n html = browser.html\n news_soup = bs(html, 'html.parser')\n news_title = browser.find_by_css('div.content_title a')[0].text\n news_p = news_soup.find_all('div', class_='article_teaser_body')[0].text\n\n browser.visit('https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html')\n browser.links.find_by_partial_text('FULL IMAGE').click()\n image = browser.find_by_css('img.fancybox-image')['src']\n image_url = image\n\n br_facts_url = 'https://space-facts.com/mars/'\n tables = pd.read_html(br_facts_url)\n big_red_df = tables[2]\n big_red_df.columns = ['Description','Mars']\n big_red_df.set_index('Description', inplace=True)\n html_tbl = big_red_df.to_html()\n html_tbl.replace('\\n','')\n\n #Mars Hemisphere name and image scrape\n main_url = 'https://astrogeology.usgs.gov'\n hemispheres = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(hemispheres)\n hemispheres_html = browser.html\n h_soup = bs(hemispheres_html, 'html.parser')\n all_hemp= h_soup.find('div',class_ = 'collapsible results')\n big_red_hemp = all_hemp.find_all('div',class_='item')\n hemi_images = []\n\n#for loop for hemi data\nfor d in big_red_hemp:\n #title\n hemisphere = d.find('div', class_ =\"description\")\n title = hemisphere.h3.text\n \n #Image link\n hemp_url = hemisphere.a[\"href\"]\n browser.visit(main_url + hemisphere_url)\n \n i_html = browser.html\n i_soup = bs(i_html, 'html.parser')\n \n link = i_soup.find('div', class_ ='downloads')\n image_url = link.find('li').a['href']\n \n "
]
| [
[
"pandas.read_html"
]
]
|
osanseviero/allennlp | [
"bffdbfd1a6da648145ee62556a1c01ba022eb0e4"
]
| [
"allennlp/common/file_utils.py"
]
| [
"\"\"\"\nUtilities for working with the local dataset cache.\n\"\"\"\nimport weakref\nfrom contextlib import contextmanager\nimport glob\nimport io\nimport os\nimport logging\nimport tempfile\nimport json\nfrom abc import ABC\nfrom collections import defaultdict\nfrom dataclasses import dataclass, asdict\nfrom datetime import timedelta\nfrom fnmatch import fnmatch\nfrom os import PathLike\nfrom urllib.parse import urlparse\nfrom pathlib import Path\nfrom typing import (\n Optional,\n Tuple,\n Union,\n IO,\n Callable,\n Set,\n List,\n Iterator,\n Iterable,\n Dict,\n NamedTuple,\n MutableMapping,\n)\nfrom hashlib import sha256\nfrom functools import wraps\nfrom weakref import WeakValueDictionary\nfrom zipfile import ZipFile, is_zipfile\nimport tarfile\nimport shutil\nimport pickle\nimport time\nimport warnings\n\nimport boto3\nimport botocore\nimport torch\nfrom filelock import FileLock as _FileLock\nfrom google.cloud import storage\nfrom google.api_core.exceptions import NotFound\nimport numpy as np\nfrom overrides import overrides\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\nimport lmdb\nfrom torch import Tensor\nimport huggingface_hub as hf_hub\n\nfrom allennlp.version import VERSION\nfrom allennlp.common.tqdm import Tqdm\n\nlogger = logging.getLogger(__name__)\n\nCACHE_ROOT = Path(os.getenv(\"ALLENNLP_CACHE_ROOT\", Path.home() / \".allennlp\"))\nCACHE_DIRECTORY = str(CACHE_ROOT / \"cache\")\nDEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / \"datasets\")\n\n# This variable was deprecated in 0.7.2 since we use a single folder for caching\n# all types of files (datasets, models, etc.)\nDATASET_CACHE = CACHE_DIRECTORY\n\n# Warn if the user is still using the deprecated cache directory.\nif os.path.exists(DEPRECATED_CACHE_DIRECTORY):\n logger.warning(\n f\"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). \"\n f\"Please remove this directory from your system to free up space.\"\n )\n\n\nclass FileLock(_FileLock):\n \"\"\"\n This is just a subclass of the `FileLock` class from the `filelock` library, except that\n it adds an additional argument to the `__init__` method: `read_only_ok`.\n\n By default this flag is `False`, which an exception will be thrown when a lock\n can't be acquired due to lack of write permissions.\n But if this flag is set to `True`, a warning will be emitted instead of an error when\n the lock already exists but the lock can't be acquired because write access is blocked.\n \"\"\"\n\n def __init__(\n self, lock_file: Union[str, PathLike], timeout=-1, read_only_ok: bool = False\n ) -> None:\n super().__init__(str(lock_file), timeout=timeout)\n self._read_only_ok = read_only_ok\n\n @overrides\n def acquire(self, timeout=None, poll_interval=0.05):\n try:\n super().acquire(timeout=timeout, poll_intervall=poll_interval)\n except OSError as err:\n # OSError could be a lot of different things, but what we're looking\n # for in particular are permission errors, such as:\n # - errno 1 - EPERM - \"Operation not permitted\"\n # - errno 13 - EACCES - \"Permission denied\"\n # - errno 30 - EROFS - \"Read-only file system\"\n if err.errno not in (1, 13, 30):\n raise\n\n if os.path.isfile(self._lock_file) and self._read_only_ok:\n warnings.warn(\n f\"Lacking permissions required to obtain lock '{self._lock_file}'. \"\n \"Race conditions are possible if other processes are writing to the same resource.\",\n UserWarning,\n )\n else:\n raise\n\n\ndef _resource_to_filename(resource: str, etag: str = None) -> str:\n \"\"\"\n Convert a `resource` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the resources's, delimited\n by a period.\n \"\"\"\n resource_bytes = resource.encode(\"utf-8\")\n resource_hash = sha256(resource_bytes)\n filename = resource_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode(\"utf-8\")\n etag_hash = sha256(etag_bytes)\n filename += \".\" + etag_hash.hexdigest()\n\n return filename\n\n\ndef filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:\n \"\"\"\n Return the url and etag (which may be `None`) stored for `filename`.\n Raise `FileNotFoundError` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = CACHE_DIRECTORY\n\n cache_path = os.path.join(cache_dir, filename)\n if not os.path.exists(cache_path):\n raise FileNotFoundError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + \".json\"\n if not os.path.exists(meta_path):\n raise FileNotFoundError(\"file {} not found\".format(meta_path))\n\n with open(meta_path) as meta_file:\n metadata = json.load(meta_file)\n url = metadata[\"url\"]\n etag = metadata[\"etag\"]\n\n return url, etag\n\n\ndef check_tarfile(tar_file: tarfile.TarFile):\n \"\"\"Tar files can contain files outside of the extraction directory, or symlinks that point\n outside the extraction directory. We also don't want any block devices fifos, or other\n weird file types extracted. This checks for those issues and throws an exception if there\n is a problem.\"\"\"\n base_path = os.path.join(\"tmp\", \"pathtest\")\n base_path = os.path.normpath(base_path)\n\n def normalize_path(path: str) -> str:\n path = path.rstrip(\"/\")\n path = path.replace(\"/\", os.sep)\n path = os.path.join(base_path, path)\n path = os.path.normpath(path)\n return path\n\n for tarinfo in tar_file:\n if not (\n tarinfo.isreg()\n or tarinfo.isdir()\n or tarinfo.isfile()\n or tarinfo.islnk()\n or tarinfo.issym()\n ):\n raise ValueError(\n f\"Tar file {str(tar_file.name)} contains invalid member {tarinfo.name}.\"\n )\n\n target_path = normalize_path(tarinfo.name)\n if os.path.commonprefix([base_path, target_path]) != base_path:\n raise ValueError(\n f\"Tar file {str(tar_file.name)} is trying to create a file outside of its extraction directory.\"\n )\n\n if tarinfo.islnk() or tarinfo.issym():\n target_path = normalize_path(tarinfo.linkname)\n if os.path.commonprefix([base_path, target_path]) != base_path:\n raise ValueError(\n f\"Tar file {str(tar_file.name)} is trying to link to a file \"\n \"outside of its extraction directory.\"\n )\n\n\ndef cached_path(\n url_or_filename: Union[str, PathLike],\n cache_dir: Union[str, Path] = None,\n extract_archive: bool = False,\n force_extract: bool = False,\n) -> str:\n \"\"\"\n Given something that might be a URL or local path, determine which.\n If it's a remote resource, download the file and cache it, and\n then return the path to the cached file. If it's already a local path,\n make sure the file exists and return the path.\n\n For URLs, \"http://\", \"https://\", \"s3://\", \"gs://\", and \"hf://\" are all supported.\n The latter corresponds to the HuggingFace Hub.\n\n For example, to download the PyTorch weights for the model `epwalsh/bert-xsmall-dummy`\n on HuggingFace, you could do:\n\n ```python\n cached_path(\"hf://epwalsh/bert-xsmall-dummy/pytorch_model.bin\")\n ```\n\n For paths or URLs that point to a tarfile or zipfile, you can also add a path\n to a specific file to the `url_or_filename` preceeded by a \"!\", and the archive will\n be automatically extracted (provided you set `extract_archive` to `True`),\n returning the local path to the specific file. For example:\n\n ```python\n cached_path(\"model.tar.gz!weights.th\", extract_archive=True)\n ```\n\n # Parameters\n\n url_or_filename : `Union[str, Path]`\n A URL or path to parse and possibly download.\n\n cache_dir : `Union[str, Path]`, optional (default = `None`)\n The directory to cache downloads.\n\n extract_archive : `bool`, optional (default = `False`)\n If `True`, then zip or tar.gz archives will be automatically extracted.\n In which case the directory is returned.\n\n force_extract : `bool`, optional (default = `False`)\n If `True` and the file is an archive file, it will be extracted regardless\n of whether or not the extracted directory already exists.\n\n !!! Warning\n Use this flag with caution! This can lead to race conditions if used\n from multiple processes on the same file.\n \"\"\"\n if cache_dir is None:\n cache_dir = CACHE_DIRECTORY\n\n cache_dir = os.path.expanduser(cache_dir)\n os.makedirs(cache_dir, exist_ok=True)\n\n if not isinstance(url_or_filename, str):\n url_or_filename = str(url_or_filename)\n\n file_path: str\n extraction_path: Optional[str] = None\n\n # If we're using the /a/b/foo.zip!c/d/file.txt syntax, handle it here.\n exclamation_index = url_or_filename.find(\"!\")\n if extract_archive and exclamation_index >= 0:\n archive_path = url_or_filename[:exclamation_index]\n file_name = url_or_filename[exclamation_index + 1 :]\n\n # Call 'cached_path' recursively now to get the local path to the archive itself.\n cached_archive_path = cached_path(archive_path, cache_dir, True, force_extract)\n if not os.path.isdir(cached_archive_path):\n raise ValueError(\n f\"{url_or_filename} uses the ! syntax, but does not specify an archive file.\"\n )\n\n # Now return the full path to the desired file within the extracted archive,\n # provided it exists.\n file_path = os.path.join(cached_archive_path, file_name)\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"file {file_name} not found within {archive_path}\")\n\n return file_path\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\", \"hf\", \"gs\"):\n # URL, so get it from the cache (downloading if necessary)\n file_path = get_from_cache(url_or_filename, cache_dir)\n\n if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):\n # This is the path the file should be extracted to.\n # For example ~/.allennlp/cache/234234.21341 -> ~/.allennlp/cache/234234.21341-extracted\n extraction_path = file_path + \"-extracted\"\n\n else:\n url_or_filename = os.path.expanduser(url_or_filename)\n\n if os.path.exists(url_or_filename):\n # File, and it exists.\n file_path = url_or_filename\n # Normalize the path.\n url_or_filename = os.path.abspath(url_or_filename)\n\n if (\n extract_archive\n and os.path.isfile(file_path)\n and (is_zipfile(file_path) or tarfile.is_tarfile(file_path))\n ):\n # We'll use a unique directory within the cache to root to extract the archive to.\n # The name of the directory is a hash of the resource file path and it's modification\n # time. That way, if the file changes, we'll know when to extract it again.\n extraction_name = (\n _resource_to_filename(url_or_filename, str(os.path.getmtime(file_path)))\n + \"-extracted\"\n )\n extraction_path = os.path.join(cache_dir, extraction_name)\n\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise FileNotFoundError(f\"file {url_or_filename} not found\")\n\n else:\n # Something unknown\n raise ValueError(f\"unable to parse {url_or_filename} as a URL or as a local path\")\n\n if extraction_path is not None:\n # If the extracted directory already exists (and is non-empty), then no\n # need to create a lock file and extract again unless `force_extract=True`.\n if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract:\n return extraction_path\n\n # Extract it.\n with FileLock(extraction_path + \".lock\"):\n # Check again if the directory exists now that we've acquired the lock.\n if os.path.isdir(extraction_path) and os.listdir(extraction_path):\n if force_extract:\n logger.warning(\n \"Extraction directory for %s (%s) already exists, \"\n \"overwriting it since 'force_extract' is 'True'\",\n url_or_filename,\n extraction_path,\n )\n else:\n return extraction_path\n\n logger.info(\"Extracting %s to %s\", url_or_filename, extraction_path)\n shutil.rmtree(extraction_path, ignore_errors=True)\n\n # We extract first to a temporary directory in case something goes wrong\n # during the extraction process so we don't end up with a corrupted cache.\n tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0])\n try:\n if is_zipfile(file_path):\n with ZipFile(file_path, \"r\") as zip_file:\n zip_file.extractall(tmp_extraction_dir)\n zip_file.close()\n else:\n tar_file = tarfile.open(file_path)\n check_tarfile(tar_file)\n tar_file.extractall(tmp_extraction_dir)\n tar_file.close()\n # Extraction was successful, rename temp directory to final\n # cache directory and dump the meta data.\n os.replace(tmp_extraction_dir, extraction_path)\n meta = _Meta(\n resource=url_or_filename,\n cached_path=extraction_path,\n creation_time=time.time(),\n extraction_dir=True,\n size=_get_resource_size(extraction_path),\n )\n meta.to_file()\n finally:\n shutil.rmtree(tmp_extraction_dir, ignore_errors=True)\n\n return extraction_path\n\n return file_path\n\n\ndef is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine check if it's url or an existing file path.\n \"\"\"\n if url_or_filename is None:\n return False\n url_or_filename = os.path.expanduser(str(url_or_filename))\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\", \"s3\", \"gs\") or os.path.exists(url_or_filename)\n\n\ndef _split_s3_path(url: str) -> Tuple[str, str]:\n return _split_cloud_path(url, \"s3\")\n\n\ndef _split_gcs_path(url: str) -> Tuple[str, str]:\n return _split_cloud_path(url, \"gs\")\n\n\ndef _split_cloud_path(url: str, provider: str) -> Tuple[str, str]:\n \"\"\"Split a full s3 path into the bucket name and path.\"\"\"\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad {} path {}\".format(provider, url))\n bucket_name = parsed.netloc\n provider_path = parsed.path\n # Remove '/' at beginning of path.\n if provider_path.startswith(\"/\"):\n provider_path = provider_path[1:]\n return bucket_name, provider_path\n\n\ndef _s3_request(func: Callable):\n \"\"\"\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n \"\"\"\n\n @wraps(func)\n def wrapper(url: str, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except botocore.exceptions.ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise FileNotFoundError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper\n\n\ndef _get_s3_resource():\n session = boto3.session.Session()\n if session.get_credentials() is None:\n # Use unsigned requests.\n s3_resource = session.resource(\n \"s3\", config=botocore.client.Config(signature_version=botocore.UNSIGNED)\n )\n else:\n s3_resource = session.resource(\"s3\")\n return s3_resource\n\n\n@_s3_request\ndef _s3_etag(url: str) -> Optional[str]:\n \"\"\"Check ETag on S3 object.\"\"\"\n s3_resource = _get_s3_resource()\n bucket_name, s3_path = _split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag\n\n\n@_s3_request\ndef _s3_get(url: str, temp_file: IO) -> None:\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = _get_s3_resource()\n bucket_name, s3_path = _split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)\n\n\ndef _gcs_request(func: Callable):\n \"\"\"\n Wrapper function for gcs requests in order to create more helpful error\n messages.\n \"\"\"\n\n @wraps(func)\n def wrapper(url: str, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except NotFound:\n raise FileNotFoundError(\"file {} not found\".format(url))\n\n return wrapper\n\n\ndef _get_gcs_client():\n storage_client = storage.Client()\n return storage_client\n\n\ndef _get_gcs_blob(url: str) -> storage.blob.Blob:\n gcs_resource = _get_gcs_client()\n bucket_name, gcs_path = _split_gcs_path(url)\n bucket = gcs_resource.bucket(bucket_name)\n blob = bucket.blob(gcs_path)\n return blob\n\n\n@_gcs_request\ndef _gcs_md5(url: str) -> Optional[str]:\n \"\"\"Get GCS object's md5.\"\"\"\n blob = _get_gcs_blob(url)\n return blob.md5_hash\n\n\n@_gcs_request\ndef _gcs_get(url: str, temp_filename: str) -> None:\n \"\"\"Pull a file directly from GCS.\"\"\"\n blob = _get_gcs_blob(url)\n blob.download_to_filename(temp_filename)\n\n\ndef _session_with_backoff() -> requests.Session:\n \"\"\"\n We ran into an issue where http requests to s3 were timing out,\n possibly because we were making too many requests too quickly.\n This helper function returns a requests session that has retry-with-backoff\n built in. See\n <https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>.\n \"\"\"\n session = requests.Session()\n retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])\n session.mount(\"http://\", HTTPAdapter(max_retries=retries))\n session.mount(\"https://\", HTTPAdapter(max_retries=retries))\n\n return session\n\n\ndef _http_etag(url: str) -> Optional[str]:\n with _session_with_backoff() as session:\n response = session.head(url, allow_redirects=True)\n if response.status_code != 200:\n raise OSError(\n \"HEAD request failed for url {} with status code {}\".format(url, response.status_code)\n )\n return response.headers.get(\"ETag\")\n\n\ndef _http_get(url: str, temp_file: IO) -> None:\n with _session_with_backoff() as session:\n req = session.get(url, stream=True)\n req.raise_for_status()\n content_length = req.headers.get(\"Content-Length\")\n total = int(content_length) if content_length is not None else None\n progress = Tqdm.tqdm(unit=\"B\", total=total, desc=\"downloading\")\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\n\ndef _find_latest_cached(url: str, cache_dir: Union[str, Path]) -> Optional[str]:\n filename = _resource_to_filename(url)\n cache_path = os.path.join(cache_dir, filename)\n candidates: List[Tuple[str, float]] = []\n for path in glob.glob(cache_path + \"*\"):\n if path.endswith(\".json\") or path.endswith(\"-extracted\") or path.endswith(\".lock\"):\n continue\n mtime = os.path.getmtime(path)\n candidates.append((path, mtime))\n # Sort candidates by modification time, newest first.\n candidates.sort(key=lambda x: x[1], reverse=True)\n if candidates:\n return candidates[0][0]\n return None\n\n\ndef _serialize(data):\n buffer = pickle.dumps(data, protocol=-1)\n return np.frombuffer(buffer, dtype=np.uint8)\n\n\n_active_tensor_caches: MutableMapping[int, \"TensorCache\"] = weakref.WeakValueDictionary()\n\n\ndef _unique_file_id(path: Union[str, PathLike]) -> int:\n result = os.stat(path).st_ino\n assert result != 0\n return result\n\n\nclass TensorCache(MutableMapping[str, Tensor], ABC):\n \"\"\"\n This is a key-value store, mapping strings to tensors. The data is kept on disk,\n making this class useful as a cache for storing tensors.\n\n `TensorCache` is also safe to access from multiple processes at the same time, so\n you can use it in distributed training situations, or from multiple training\n runs at the same time.\n \"\"\"\n\n def __new__(cls, filename: Union[str, PathLike], *, read_only: bool = False, **kwargs):\n # This mechanism makes sure we re-use open lmdb file handles. Lmdb has a problem when the same file is\n # opened by the same process multiple times. This is our workaround.\n filename = str(filename)\n try:\n result = _active_tensor_caches.get(_unique_file_id(filename))\n except FileNotFoundError:\n result = None\n if result is None:\n result = super(TensorCache, cls).__new__(\n cls, filename, read_only=read_only, **kwargs\n ) # type: ignore\n return result\n\n def __init__(\n self,\n filename: Union[str, PathLike],\n *,\n map_size: int = 1024 * 1024 * 1024 * 1024,\n read_only: bool = False,\n ) -> None:\n \"\"\"\n Creates a `TensorCache` by either opening an existing one on disk, or creating\n a new one. Its interface is almost exactly like a Python dictionary, where the\n keys are strings and the values are `torch.Tensor`.\n\n Parameters\n ----------\n filename: `str`\n Path to the location of the cache\n map_size: `int`, optional, defaults to 1TB\n This is the maximum size the cache will ever grow to. On reasonable operating\n systems, there is no penalty to making this a large value.\n `TensorCache` uses a memory-mapped file to store the data. When the file is\n first opened, we have to give the maximum size it can ever grow to. This is\n that number. Reasonable operating systems don't actually allocate that space\n until it is really needed.\n \"\"\"\n self.lmdb_env: lmdb.Environment\n if hasattr(self, \"lmdb_env\"):\n # We're being initialized again after a cache hit in _active_tensor_caches, thanks\n # to __new__. In this case, we may have to upgrade to read/write, but other than\n # that we are good to go.\n if read_only:\n return\n if not self.read_only:\n return\n\n # Upgrade a read-only lmdb env to a read/write lmdb env.\n filename = self.lmdb_env.path()\n old_info = self.lmdb_env.info()\n\n self.lmdb_env.close()\n self.lmdb_env = lmdb.open(\n filename,\n map_size=old_info[\"map_size\"],\n subdir=False,\n metasync=False,\n sync=True,\n readahead=False,\n meminit=False,\n readonly=False,\n lock=True,\n )\n else:\n filename = str(filename)\n\n cpu_count = os.cpu_count() or 1\n if os.path.exists(filename):\n if os.path.isfile(filename):\n # If the file is not writable, set read_only to True, but issue a warning.\n if not os.access(filename, os.W_OK):\n if not read_only:\n warnings.warn(\n f\"File '{filename}' is read-only, so cache will be read-only\",\n UserWarning,\n )\n read_only = True\n else:\n # If it's not a file, raise an error.\n raise ValueError(\"Expect a file, found a directory instead\")\n\n use_lock = True\n if read_only:\n # Check if the lock file is writable. If it's not, then we won't be able to use the lock.\n\n # This is always how lmdb names the lock file.\n lock_filename = filename + \"-lock\"\n if os.path.isfile(lock_filename):\n use_lock = os.access(lock_filename, os.W_OK)\n else:\n # If the lock file doesn't exist yet, then the directory needs to be writable in\n # order to create and use the lock file.\n use_lock = os.access(os.path.dirname(lock_filename), os.W_OK)\n\n if not use_lock:\n warnings.warn(\n f\"Lacking permissions to use lock file on cache '{filename}'.\\nUse at your own risk!\",\n UserWarning,\n )\n\n self.lmdb_env = lmdb.open(\n filename,\n subdir=False,\n map_size=map_size,\n max_readers=cpu_count * 4,\n max_spare_txns=cpu_count * 4,\n metasync=False,\n sync=True,\n readahead=False,\n meminit=False,\n readonly=read_only,\n lock=use_lock,\n )\n _active_tensor_caches[_unique_file_id(filename)] = self\n\n # We have another cache here that makes sure we return the same object for the same key. Without it,\n # you would get a different tensor, using different memory, every time you call __getitem__(), even\n # if you call it with the same key.\n # The downside is that we can't keep self.cache_cache up to date when multiple processes modify the\n # cache at the same time. We can guarantee though that it is up to date as long as processes either\n # write new values, or read existing ones.\n self.cache_cache: MutableMapping[str, Tensor] = WeakValueDictionary()\n\n @property\n def read_only(self) -> bool:\n return self.lmdb_env.flags()[\"readonly\"]\n\n def __contains__(self, key: object):\n if not isinstance(key, str):\n return False\n if key in self.cache_cache:\n return True\n encoded_key = key.encode()\n with self.lmdb_env.begin(write=False) as txn:\n result = txn.get(encoded_key)\n return result is not None\n\n def __getitem__(self, key: str):\n try:\n return self.cache_cache[key]\n except KeyError:\n encoded_key = key.encode()\n with self.lmdb_env.begin(write=False) as txn:\n buffer = txn.get(encoded_key)\n if buffer is None:\n raise KeyError()\n tensor = torch.load(io.BytesIO(buffer), map_location=\"cpu\")\n self.cache_cache[key] = tensor\n return tensor\n\n def __setitem__(self, key: str, tensor: torch.Tensor):\n if self.read_only:\n raise ValueError(\"cannot write to a read-only cache\")\n\n tensor = tensor.cpu()\n encoded_key = key.encode()\n buffer = io.BytesIO()\n if tensor.storage().size() != np.prod(tensor.size()):\n tensor = tensor.clone()\n assert tensor.storage().size() == np.prod(tensor.size())\n torch.save(tensor.detach(), buffer, pickle_protocol=pickle.HIGHEST_PROTOCOL)\n with self.lmdb_env.begin(write=True) as txn:\n txn.put(encoded_key, buffer.getbuffer())\n\n self.cache_cache[key] = tensor\n\n def __delitem__(self, key: str):\n if self.read_only:\n raise ValueError(\"cannot write to a read-only cache\")\n\n encoded_key = key.encode()\n with self.lmdb_env.begin(write=True) as txn:\n txn.delete(encoded_key)\n\n try:\n del self.cache_cache[key]\n except KeyError:\n pass\n\n def __del__(self):\n if self.lmdb_env is not None:\n self.lmdb_env.close()\n self.lmdb_env = None\n\n def __len__(self):\n return self.lmdb_env.stat()[\"entries\"]\n\n def __iter__(self):\n # It is not hard to implement this, but we have not needed it so far.\n raise NotImplementedError()\n\n\nclass CacheFile:\n \"\"\"\n This is a context manager that makes robust caching easier.\n\n On `__enter__`, an IO handle to a temporarily file is returned, which can\n be treated as if it's the actual cache file.\n\n On `__exit__`, the temporarily file is renamed to the cache file. If anything\n goes wrong while writing to the temporary file, it will be removed.\n \"\"\"\n\n def __init__(\n self, cache_filename: Union[PathLike, str], mode: str = \"w+b\", suffix: str = \".tmp\"\n ) -> None:\n self.cache_filename = (\n cache_filename if isinstance(cache_filename, Path) else Path(cache_filename)\n )\n self.cache_directory = os.path.dirname(self.cache_filename)\n self.mode = mode\n self.temp_file = tempfile.NamedTemporaryFile(\n self.mode, dir=self.cache_directory, delete=False, suffix=suffix\n )\n\n def __enter__(self):\n return self.temp_file\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.temp_file.close()\n if exc_value is None:\n # Success.\n logger.debug(\n \"Renaming temp file %s to cache at %s\", self.temp_file.name, self.cache_filename\n )\n # Rename the temp file to the actual cache filename.\n os.replace(self.temp_file.name, self.cache_filename)\n return True\n # Something went wrong, remove the temp file.\n logger.debug(\"removing temp file %s\", self.temp_file.name)\n os.remove(self.temp_file.name)\n return False\n\n\nclass LocalCacheResource:\n \"\"\"\n This is a context manager that can be used to fetch and cache arbitrary resources locally\n using the same mechanisms that `cached_path` uses for remote resources.\n\n It can be used, for example, when you want to cache the result of an expensive computation.\n\n # Examples\n\n ```python\n with LocalCacheResource(\"long-computation\", \"v1\") as cache:\n if cache.cached():\n with cache.reader() as f:\n # read from cache\n else:\n with cache.writer() as f:\n # do the computation\n # ...\n # write to cache\n ```\n \"\"\"\n\n def __init__(self, resource_name: str, version: str, cache_dir: str = CACHE_DIRECTORY) -> None:\n self.resource_name = resource_name\n self.version = version\n self.cache_dir = cache_dir\n self.path = os.path.join(self.cache_dir, _resource_to_filename(resource_name, version))\n self.file_lock = FileLock(self.path + \".lock\")\n\n def cached(self) -> bool:\n return os.path.exists(self.path)\n\n @contextmanager\n def writer(self, mode=\"w\"):\n if self.cached():\n raise ValueError(\n f\"local cache of {self.resource_name} (version '{self.version}') already exists!\"\n )\n\n with CacheFile(self.path, mode=mode) as f:\n yield f\n\n meta = _Meta(\n resource=self.resource_name,\n cached_path=self.path,\n creation_time=time.time(),\n etag=self.version,\n size=_get_resource_size(self.path),\n )\n meta.to_file()\n\n @contextmanager\n def reader(self, mode=\"r\"):\n if not self.cached():\n raise ValueError(\n f\"local cache of {self.resource_name} (version '{self.version}') does not exist yet!\"\n )\n\n with open(self.path, mode) as f:\n yield f\n\n def __enter__(self):\n self.file_lock.acquire()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.file_lock.release()\n if exc_value is None:\n return True\n return False\n\n\n@dataclass\nclass _Meta:\n \"\"\"\n Any resource that is downloaded to - or extracted in - the cache directory will\n have a meta JSON file written next to it, which corresponds to an instance\n of this class.\n\n In older versions of AllenNLP, this meta document just had two fields: 'url' and\n 'etag'. The 'url' field is now the more general 'resource' field, but these old\n meta files are still compatible when a `_Meta` is instantiated with the `.from_path()`\n class method.\n \"\"\"\n\n resource: str\n \"\"\"\n URL or normalized path to the resource.\n \"\"\"\n\n cached_path: str\n \"\"\"\n Path to the corresponding cached version of the resource.\n \"\"\"\n\n creation_time: float\n \"\"\"\n The unix timestamp of when the corresponding resource was cached or extracted.\n \"\"\"\n\n size: int = 0\n \"\"\"\n The size of the corresponding resource, in bytes.\n \"\"\"\n\n etag: Optional[str] = None\n \"\"\"\n Optional ETag associated with the current cached version of the resource.\n \"\"\"\n\n extraction_dir: bool = False\n \"\"\"\n Does this meta corresponded to an extraction directory?\n \"\"\"\n\n def to_file(self) -> None:\n with open(self.cached_path + \".json\", \"w\") as meta_file:\n json.dump(asdict(self), meta_file)\n\n @classmethod\n def from_path(cls, path: Union[str, Path]) -> \"_Meta\":\n path = str(path)\n with open(path) as meta_file:\n data = json.load(meta_file)\n # For backwards compat:\n if \"resource\" not in data:\n data[\"resource\"] = data.pop(\"url\")\n if \"creation_time\" not in data:\n data[\"creation_time\"] = os.path.getmtime(path[:-5])\n if \"extraction_dir\" not in data and path.endswith(\"-extracted.json\"):\n data[\"extraction_dir\"] = True\n if \"cached_path\" not in data:\n data[\"cached_path\"] = path[:-5]\n if \"size\" not in data:\n data[\"size\"] = _get_resource_size(data[\"cached_path\"])\n return cls(**data)\n\n\ndef _hf_hub_download(\n url, model_identifier: str, filename: Optional[str], cache_dir: Union[str, Path]\n) -> str:\n revision: Optional[str]\n if \"@\" in model_identifier:\n repo_id = model_identifier.split(\"@\")[0]\n revision = model_identifier.split(\"@\")[1]\n else:\n repo_id = model_identifier\n revision = None\n\n if filename is not None:\n hub_url = hf_hub.hf_hub_url(repo_id=repo_id, filename=filename, revision=revision)\n cache_path = str(\n hf_hub.cached_download(\n url=hub_url,\n library_name=\"allennlp\",\n library_version=VERSION,\n cache_dir=cache_dir,\n )\n )\n # HF writes it's own meta '.json' file which uses the same format we used to use and still\n # support, but is missing some fields that we like to have.\n # So we overwrite it when it we can.\n with FileLock(cache_path + \".lock\", read_only_ok=True):\n meta = _Meta.from_path(cache_path + \".json\")\n # The file HF writes will have 'resource' set to the 'http' URL corresponding to the 'hf://' URL,\n # but we want 'resource' to be the original 'hf://' URL.\n if meta.resource != url:\n meta.resource = url\n meta.to_file()\n else:\n cache_path = str(hf_hub.snapshot_download(repo_id, revision=revision, cache_dir=cache_dir))\n # Need to write the meta file for snapshot downloads if it doesn't exist.\n with FileLock(cache_path + \".lock\", read_only_ok=True):\n if not os.path.exists(cache_path + \".json\"):\n meta = _Meta(\n resource=url,\n cached_path=cache_path,\n creation_time=time.time(),\n extraction_dir=True,\n size=_get_resource_size(cache_path),\n )\n meta.to_file()\n return cache_path\n\n\n# TODO(joelgrus): do we want to do checksums or anything like that?\ndef get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:\n \"\"\"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n \"\"\"\n if cache_dir is None:\n cache_dir = CACHE_DIRECTORY\n\n if url.startswith(\"hf://\"):\n # Remove the 'hf://' prefix\n identifier = url[5:]\n\n if identifier.count(\"/\") > 1:\n filename = \"/\".join(identifier.split(\"/\")[2:])\n model_identifier = \"/\".join(identifier.split(\"/\")[:2])\n return _hf_hub_download(url, model_identifier, filename, cache_dir)\n elif identifier.count(\"/\") == 1:\n # 'hf://' URLs like 'hf://xxxx/yyyy' are potentially ambiguous,\n # because this could refer to either:\n # 1. the file 'yyyy' in the 'xxxx' repository, or\n # 2. the repo 'yyyy' under the user/org name 'xxxx'.\n # We default to (1), but if we get a 404 error then we try (2).\n try:\n model_identifier, filename = identifier.split(\"/\")\n return _hf_hub_download(url, model_identifier, filename, cache_dir)\n except requests.exceptions.HTTPError as exc:\n if exc.response.status_code == 404:\n return _hf_hub_download(url, identifier, None, cache_dir)\n raise\n else:\n return _hf_hub_download(url, identifier, None, cache_dir)\n\n # Get eTag to add to filename, if it exists.\n try:\n if url.startswith(\"s3://\"):\n etag = _s3_etag(url)\n elif url.startswith(\"gs://\"):\n etag = _gcs_md5(url)\n else:\n etag = _http_etag(url)\n except (requests.exceptions.ConnectionError, botocore.exceptions.EndpointConnectionError):\n # We might be offline, in which case we don't want to throw an error\n # just yet. Instead, we'll try to use the latest cached version of the\n # target resource, if it exists. We'll only throw an exception if we\n # haven't cached the resource at all yet.\n logger.warning(\n \"Connection error occurred while trying to fetch ETag for %s. \"\n \"Will attempt to use latest cached version of resource\",\n url,\n )\n latest_cached = _find_latest_cached(url, cache_dir)\n if latest_cached:\n logger.info(\n \"ETag request failed with connection error, using latest cached \"\n \"version of %s: %s\",\n url,\n latest_cached,\n )\n return latest_cached\n else:\n logger.error(\n \"Connection failed while trying to fetch ETag, \"\n \"and no cached version of %s could be found\",\n url,\n )\n raise\n except OSError:\n # OSError may be triggered if we were unable to fetch the eTag.\n # If this is the case, try to proceed without eTag check.\n etag = None\n\n filename = _resource_to_filename(url, etag)\n\n # Get cache path to put the file.\n cache_path = os.path.join(cache_dir, filename)\n\n # Multiple processes may be trying to cache the same file at once, so we need\n # to be a little careful to avoid race conditions. We do this using a lock file.\n # Only one process can own this lock file at a time, and a process will block\n # on the call to `lock.acquire()` until the process currently holding the lock\n # releases it.\n logger.debug(\"waiting to acquire lock on %s\", cache_path)\n with FileLock(cache_path + \".lock\", read_only_ok=True):\n if os.path.exists(cache_path):\n logger.info(\"cache of %s is up-to-date\", url)\n else:\n with CacheFile(cache_path) as cache_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, cache_path)\n\n # GET file object\n if url.startswith(\"s3://\"):\n _s3_get(url, cache_file)\n elif url.startswith(\"gs://\"):\n _gcs_get(url, cache_file.name)\n else:\n _http_get(url, cache_file)\n\n logger.debug(\"creating metadata file for %s\", cache_path)\n meta = _Meta(\n resource=url,\n cached_path=cache_path,\n creation_time=time.time(),\n etag=etag,\n size=_get_resource_size(cache_path),\n )\n meta.to_file()\n\n return cache_path\n\n\ndef read_set_from_file(filename: str) -> Set[str]:\n \"\"\"\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n \"\"\"\n collection = set()\n with open(filename, \"r\") as file_:\n for line in file_:\n collection.add(line.rstrip())\n return collection\n\n\ndef get_file_extension(path: str, dot=True, lower: bool = True):\n ext = os.path.splitext(path)[1]\n ext = ext if dot else ext[1:]\n return ext.lower() if lower else ext\n\n\ndef open_compressed(\n filename: Union[str, PathLike], mode: str = \"rt\", encoding: Optional[str] = \"UTF-8\", **kwargs\n):\n if not isinstance(filename, str):\n filename = str(filename)\n open_fn: Callable = open\n\n if filename.endswith(\".gz\"):\n import gzip\n\n open_fn = gzip.open\n elif filename.endswith(\".bz2\"):\n import bz2\n\n open_fn = bz2.open\n return open_fn(cached_path(filename), mode=mode, encoding=encoding, **kwargs)\n\n\ndef text_lines_from_file(filename: Union[str, PathLike], strip_lines: bool = True) -> Iterator[str]:\n with open_compressed(filename, \"rt\", encoding=\"UTF-8\", errors=\"replace\") as p:\n if strip_lines:\n for line in p:\n yield line.strip()\n else:\n yield from p\n\n\ndef json_lines_from_file(filename: Union[str, PathLike]) -> Iterable[Union[list, dict]]:\n return (json.loads(line) for line in text_lines_from_file(filename))\n\n\ndef _get_resource_size(path: str) -> int:\n \"\"\"\n Get the size of a file or directory.\n \"\"\"\n if os.path.isfile(path):\n return os.path.getsize(path)\n inodes: Set[int] = set()\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n # skip if it is symbolic link or the same as a file we've already accounted\n # for (this could happen with hard links).\n inode = os.stat(fp).st_ino\n if not os.path.islink(fp) and inode not in inodes:\n inodes.add(inode)\n total_size += os.path.getsize(fp)\n return total_size\n\n\nclass _CacheEntry(NamedTuple):\n regular_files: List[_Meta]\n extraction_dirs: List[_Meta]\n\n\ndef _find_entries(\n patterns: List[str] = None,\n cache_dir: Union[str, Path] = None,\n) -> Tuple[int, Dict[str, _CacheEntry]]:\n \"\"\"\n Find all cache entries, filtering ones that don't match any of the glob patterns given.\n\n Returns the total size of the matching entries and mapping or resource name to meta data.\n\n The values in the returned mapping are tuples because we seperate meta entries that\n correspond to extraction directories vs regular cache entries.\n \"\"\"\n cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)\n\n total_size: int = 0\n cache_entries: Dict[str, _CacheEntry] = defaultdict(lambda: _CacheEntry([], []))\n for meta_path in glob.glob(str(cache_dir) + \"/*.json\"):\n meta = _Meta.from_path(meta_path)\n if patterns and not any(fnmatch(meta.resource, p) for p in patterns):\n continue\n if meta.extraction_dir:\n cache_entries[meta.resource].extraction_dirs.append(meta)\n else:\n cache_entries[meta.resource].regular_files.append(meta)\n total_size += meta.size\n\n # Sort entries for each resource by creation time, newest first.\n for entry in cache_entries.values():\n entry.regular_files.sort(key=lambda meta: meta.creation_time, reverse=True)\n entry.extraction_dirs.sort(key=lambda meta: meta.creation_time, reverse=True)\n\n return total_size, cache_entries\n\n\ndef remove_cache_entries(patterns: List[str], cache_dir: Union[str, Path] = None) -> int:\n \"\"\"\n Remove cache entries matching the given patterns.\n\n Returns the total reclaimed space in bytes.\n \"\"\"\n total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)\n for resource, entry in cache_entries.items():\n for meta in entry.regular_files:\n logger.info(\"Removing cached version of %s at %s\", resource, meta.cached_path)\n os.remove(meta.cached_path)\n if os.path.exists(meta.cached_path + \".lock\"):\n os.remove(meta.cached_path + \".lock\")\n os.remove(meta.cached_path + \".json\")\n for meta in entry.extraction_dirs:\n logger.info(\"Removing extracted version of %s at %s\", resource, meta.cached_path)\n shutil.rmtree(meta.cached_path)\n if os.path.exists(meta.cached_path + \".lock\"):\n os.remove(meta.cached_path + \".lock\")\n os.remove(meta.cached_path + \".json\")\n return total_size\n\n\ndef inspect_cache(patterns: List[str] = None, cache_dir: Union[str, Path] = None):\n \"\"\"\n Print out useful information about the cache directory.\n \"\"\"\n from allennlp.common.util import format_timedelta, format_size\n\n cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)\n\n # Gather cache entries by resource.\n total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)\n\n if patterns:\n print(f\"Cached resources matching {patterns}:\")\n else:\n print(\"Cached resources:\")\n\n for resource, entry in sorted(\n cache_entries.items(),\n # Sort by creation time, latest first.\n key=lambda x: max(\n 0 if not x[1][0] else x[1][0][0].creation_time,\n 0 if not x[1][1] else x[1][1][0].creation_time,\n ),\n reverse=True,\n ):\n print(\"\\n-\", resource)\n if entry.regular_files:\n td = timedelta(seconds=time.time() - entry.regular_files[0].creation_time)\n n_versions = len(entry.regular_files)\n size = entry.regular_files[0].size\n print(\n f\" {n_versions} {'versions' if n_versions > 1 else 'version'} cached, \"\n f\"latest {format_size(size)} from {format_timedelta(td)} ago\"\n )\n if entry.extraction_dirs:\n td = timedelta(seconds=time.time() - entry.extraction_dirs[0].creation_time)\n n_versions = len(entry.extraction_dirs)\n size = entry.extraction_dirs[0].size\n print(\n f\" {n_versions} {'versions' if n_versions > 1 else 'version'} extracted, \"\n f\"latest {format_size(size)} from {format_timedelta(td)} ago\"\n )\n print(f\"\\nTotal size: {format_size(total_size)}\")\n"
]
| [
[
"numpy.frombuffer"
]
]
|
wzj52501/Music-and-Mathematics-Term-Project | [
"59229c3abbff0d8458c9130a136ac95815dd29d4"
]
| [
"eval.py"
]
| [
"import numpy as np\nfrom scipy import stats\n\nnote_status=['A3', 'A-3', 'A--3', 'A#3', 'A##3', 'B3', 'B-3', 'B--3', 'B#3', 'B##3', 'C3', 'C-3', 'C--3', 'C#3', 'C##3', 'D3', 'D-3', 'D--3', 'D#3', 'D##3', 'E3', 'E-3', 'E--3', 'E#3', 'E##3', 'F3', 'F-3', 'F--3', 'F#3', 'F##3', 'G3', 'G-3', 'G--3', 'G#3', 'G##3', 'A4', 'A-4', 'A--4', 'A#4', 'A##4', 'B4', 'B-4', 'B--4', 'B#4', 'B##4', 'C4', 'C-4', 'C--4', 'C#4', 'C##4', 'D4', 'D-4', 'D--4', 'D#4', 'D##4', 'E4', 'E-4', 'E--4', 'E#4', 'E##4', 'F4', 'F-4', 'F--4', 'F#4', 'F##4', 'G4', 'G-4', 'G--4', 'G#4', 'G##4', 'A5', 'A-5', 'A--5', 'A#5', 'A##5', 'B5', 'B-5', 'B--5', 'B#5', 'B##5', 'C5', 'C-5', 'C--5', 'C#5', 'C##5', 'D5', 'D-5', 'D--5', 'D#5', 'D##5', 'E5', 'E-5', 'E--5', 'E#5', 'E##5', 'F5', 'F-5', 'F--5', 'F#5', 'F##5', 'G5', 'G-5', 'G--5', 'G#5', 'G##5']\ninterval_status = [\"whole\", \"half\", \"quarter\", \"eighth\", \"16th\", \"32nd\", \"64th\"]\n\ndef srocc(output, target):\n return stats.spearmanr(output, target)[0]\n\ndef evaluate(note_gen, interval_gen, note_ori, interval_ori):\n n,m=len(note_gen),len(note_ori)\n x=[note_status.index(note_gen[i])*6+interval_status.index(interval_gen[i]) for i in range(n)]\n y=[note_status.index(note_ori[i])*6+interval_status.index(interval_ori[i]) for i in range(m)]\n\n score=[srocc(x[i:i+m],y) for i in range(n-m+1)]\n score.sort(reverse=True)\n\n result=0.0\n k=m\n for i in range(k):\n result+=score[i]\n\n cnt=0\n for i in range(n-1):\n flag=1\n for j in range(i+1,n-1):\n if(x[i]==x[j] and x[i+1]==x[j+1]):\n flag=0\n if(flag):\n cnt+=1\n for i in range(n-2):\n flag=1\n for j in range(i+1,n-2):\n if(x[i]==x[j] and x[i+2]==x[j+2]):\n flag=0\n if(flag):\n cnt+=1\n\n sum=1\n for i in range(n):\n for j in range(i+1,n):\n flag=1\n for k in range(j-i):\n if(j+k>=n):\n break\n if(not x[i+k]==x[j+k]):\n flag=0\n break\n if(flag):\n sum+=j-i\n return result*cnt/n/sum\n\n\ndef evaluate2(note_gen, interval_gen, note_ori, interval_ori, note_ori2, interval_ori2):\n n,m,m2=len(note_gen),len(note_ori),len(note_ori2)\n x=[note_status.index(note_gen[i])*6+interval_status.index(interval_gen[i]) for i in range(n)]\n y=[note_status.index(note_ori[i])*6+interval_status.index(interval_ori[i]) for i in range(m)]\n z=[note_status.index(note_ori2[i])*6+interval_status.index(interval_ori2[i]) for i in range(m2)]\n if(m<m2):\n score=[-233]*(n-m+1)\n else:\n score=[-233]*(n-m2+1)\n for i in range(n-m+1):\n score[i]=srocc(x[i:i+m],y)\n for i in range(n-m2+1):\n val=srocc(x[i:i+m2],z)\n if(val>score[i]):\n score[i]=val\n \n score.sort(reverse=True)\n\n result=0.0\n k=m+m2\n for i in range(k):\n result+=score[i]\n\n cnt=0\n for i in range(n-1):\n flag=1\n for j in range(i+1,n-1):\n if(x[i]==x[j] and x[i+1]==x[j+1]):\n flag=0\n if(flag):\n cnt+=1\n for i in range(n-2):\n flag=1\n for j in range(i+1,n-2):\n if(x[i]==x[j] and x[i+2]==x[j+2]):\n flag=0\n if(flag):\n cnt+=1\n\n sum=1\n for i in range(n):\n for j in range(i+1,n):\n flag=1\n for k in range(j-i):\n if(j+k>=n):\n break\n if(not x[i+k]==x[j+k]):\n flag=0\n break\n if(flag):\n sum+=j-i\n return result*cnt/n/sum\n\nif __name__ == '__main__':\n note_list1 = ['G4','B-4','A4','B-4', 'G4','D4','A4','F#4', 'D4','G4','E-4','C4','A3', 'D4','B-3', 'G3', 'C4', 'A3', 'D4', 'B-3','A3','G3']\n interval_list1 = ['quarter','eighth','eighth','quarter','eighth','eighth','quarter','eighth','eighth','half', 'quarter','eighth','eighth','quarter','eighth','eighth','eighth','eighth','quarter','quarter','eighth','eighth']\n note_list2 = ['G4','B-4','A4','B-4', 'G4','D4','A4']\n interval_list2 = ['quarter','eighth','eighth','quarter','eighth','eighth','quarter']\n\n print(evaluate(note_list1,interval_list1,note_list2,interval_list2))\n \n"
]
| [
[
"scipy.stats.spearmanr"
]
]
|
nicproulx/mne-python | [
"4b0f362df04b21249faf6c9a2495c71c337a14a8"
]
| [
"mne/preprocessing/xdawn.py"
]
| [
"# Authors: Alexandre Barachant <[email protected]>\n# Asish Panda <[email protected]>\n# Jean-Remi King <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nimport copy as cp\nfrom scipy import linalg\nfrom .ica import _get_fast_dot\nfrom .. import EvokedArray, Evoked\nfrom ..cov import Covariance, _regularized_covariance\nfrom ..decoding import TransformerMixin, BaseEstimator\nfrom ..epochs import BaseEpochs, EpochsArray\nfrom ..io import BaseRaw\nfrom ..io.pick import _pick_data_channels\nfrom ..utils import logger\nfrom ..externals.six import iteritems, itervalues\n\n\ndef _construct_signal_from_epochs(epochs, events, sfreq, tmin):\n \"\"\"Reconstruct pseudo continuous signal from epochs.\"\"\"\n n_epochs, n_channels, n_times = epochs.shape\n tmax = tmin + n_times / float(sfreq)\n start = (np.min(events[:, 0]) + int(tmin * sfreq))\n stop = (np.max(events[:, 0]) + int(tmax * sfreq) + 1)\n\n n_samples = stop - start\n n_epochs, n_channels, n_times = epochs.shape\n events_pos = events[:, 0] - events[0, 0]\n\n raw = np.zeros((n_channels, n_samples))\n for idx in range(n_epochs):\n onset = events_pos[idx]\n offset = onset + n_times\n raw[:, onset:offset] = epochs[idx]\n\n return raw\n\n\ndef _least_square_evoked(epochs_data, events, tmin, sfreq):\n \"\"\"Least square estimation of evoked response from epochs data.\n\n Parameters\n ----------\n epochs_data : array, shape (n_channels, n_times)\n The epochs data to estimate evoked.\n events : array, shape (n_events, 3)\n The events typically returned by the read_events function.\n If some events don't match the events of interest as specified\n by event_id, they will be ignored.\n tmin : float\n Start time before event.\n sfreq : float\n Sampling frequency.\n\n Returns\n -------\n evokeds : array, shape (n_class, n_components, n_times)\n An concatenated array of evoked data for each event type.\n toeplitz : array, shape (n_class * n_components, n_channels)\n An concatenated array of toeplitz matrix for each event type.\n \"\"\"\n n_epochs, n_channels, n_times = epochs_data.shape\n tmax = tmin + n_times / float(sfreq)\n\n # Deal with shuffled epochs\n events = events.copy()\n events[:, 0] -= events[0, 0] + int(tmin * sfreq)\n\n # Contruct raw signal\n raw = _construct_signal_from_epochs(epochs_data, events, sfreq, tmin)\n\n # Compute the independent evoked responses per condition, while correcting\n # for event overlaps.\n n_min, n_max = int(tmin * sfreq), int(tmax * sfreq)\n window = n_max - n_min\n n_samples = raw.shape[1]\n toeplitz = list()\n classes = np.unique(events[:, 2])\n for ii, this_class in enumerate(classes):\n # select events by type\n sel = events[:, 2] == this_class\n\n # build toeplitz matrix\n trig = np.zeros((n_samples, 1))\n ix_trig = (events[sel, 0]) + n_min\n trig[ix_trig] = 1\n toeplitz.append(linalg.toeplitz(trig[0:window], trig))\n\n # Concatenate toeplitz\n toeplitz = np.array(toeplitz)\n X = np.concatenate(toeplitz)\n\n # least square estimation\n predictor = np.dot(linalg.pinv(np.dot(X, X.T)), X)\n evokeds = np.dot(predictor, raw.T)\n evokeds = np.transpose(np.vsplit(evokeds, len(classes)), (0, 2, 1))\n return evokeds, toeplitz\n\n\ndef _fit_xdawn(epochs_data, y, n_components, reg=None, signal_cov=None,\n events=None, tmin=0., sfreq=1.):\n \"\"\"Fit filters and coefs using Xdawn Algorithm.\n\n Xdawn is a spatial filtering method designed to improve the signal\n to signal + noise ratio (SSNR) of the event related responses. Xdawn was\n originally designed for P300 evoked potential by enhancing the target\n response with respect to the non-target response. This implementation is a\n generalization to any type of event related response.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The epochs data.\n y : array, shape (n_epochs)\n The epochs class.\n n_components : int (default 2)\n The number of components to decompose the signals signals.\n reg : float | str | None (default None)\n If not None, allow regularization for covariance estimation\n if float, shrinkage covariance is used (0 <= shrinkage <= 1).\n if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')\n or Oracle Approximating Shrinkage ('oas').\n signal_cov : None | Covariance | array, shape (n_channels, n_channels)\n The signal covariance used for whitening of the data.\n if None, the covariance is estimated from the epochs signal.\n events : array, shape (n_epochs, 3)\n The epochs events, used to correct for epochs overlap.\n tmin : float\n Epochs starting time. Only used if events is passed to correct for\n epochs overlap.\n sfreq : float\n Sampling frequency. Only used if events is passed to correct for\n epochs overlap.\n\n Returns\n -------\n filters : array, shape (n_channels, n_channels)\n The Xdawn components used to decompose the data for each event type.\n patterns : array, shape (n_channels, n_channels)\n The Xdawn patterns used to restore the signals for each event type.\n evokeds : array, shape (n_class, n_components, n_times)\n The independent evoked responses per condition.\n \"\"\"\n n_epochs, n_channels, n_times = epochs_data.shape\n\n classes = np.unique(y)\n\n # Retrieve or compute whitening covariance\n if signal_cov is None:\n signal_cov = _regularized_covariance(np.hstack(epochs_data), reg)\n elif isinstance(signal_cov, Covariance):\n signal_cov = signal_cov.data\n if not isinstance(signal_cov, np.ndarray) or (\n not np.array_equal(signal_cov.shape,\n np.tile(epochs_data.shape[1], 2))):\n raise ValueError('signal_cov must be None, a covariance instance, '\n 'or an array of shape (n_chans, n_chans)')\n\n # Get prototype events\n if events is not None:\n evokeds, toeplitzs = _least_square_evoked(\n epochs_data, events, tmin, sfreq)\n else:\n evokeds, toeplitzs = list(), list()\n for c in classes:\n # Prototyped response for each class\n evokeds.append(np.mean(epochs_data[y == c, :, :], axis=0))\n toeplitzs.append(1.)\n\n filters = list()\n patterns = list()\n for evo, toeplitz in zip(evokeds, toeplitzs):\n # Estimate covariance matrix of the prototype response\n evo = np.dot(evo, toeplitz)\n evo_cov = np.matrix(_regularized_covariance(evo, reg))\n\n # Fit spatial filters\n evals, evecs = linalg.eigh(evo_cov, signal_cov)\n evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors\n evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)\n _patterns = np.linalg.pinv(evecs.T)\n filters.append(evecs[:, :n_components].T)\n patterns.append(_patterns[:, :n_components].T)\n\n filters = np.concatenate(filters, axis=0)\n patterns = np.concatenate(patterns, axis=0)\n evokeds = np.array(evokeds)\n return filters, patterns, evokeds\n\n\nclass _XdawnTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Implementation of the Xdawn Algorithm compatible with scikit-learn.\n\n Xdawn is a spatial filtering method designed to improve the signal\n to signal + noise ratio (SSNR) of the event related responses. Xdawn was\n originally designed for P300 evoked potential by enhancing the target\n response with respect to the non-target response. This implementation is a\n generalization to any type of event related response.\n\n .. note:: _XdawnTransformer does not correct for epochs overlap. To correct\n overlaps see ``Xdawn``.\n\n Parameters\n ----------\n n_components : int (default 2)\n The number of components to decompose the signals.\n reg : float | str | None (default None)\n If not None, allow regularization for covariance estimation\n if float, shrinkage covariance is used (0 <= shrinkage <= 1).\n if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')\n or Oracle Approximating Shrinkage ('oas').\n signal_cov : None | Covariance | array, shape (n_channels, n_channels)\n The signal covariance used for whitening of the data.\n if None, the covariance is estimated from the epochs signal.\n\n Attributes\n ----------\n classes_ : array, shape (n_classes)\n The event indices of the classes.\n filters_ : array, shape (n_channels, n_channels)\n The Xdawn components used to decompose the data for each event type.\n patterns_ : array, shape (n_channels, n_channels)\n The Xdawn patterns used to restore the signals for each event type.\n \"\"\"\n\n def __init__(self, n_components=2, reg=None, signal_cov=None):\n \"\"\"Init.\"\"\"\n self.n_components = n_components\n self.signal_cov = signal_cov\n self.reg = reg\n\n def fit(self, X, y=None):\n \"\"\"Fit Xdawn spatial filters.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_samples)\n The target data.\n y : array, shape (n_epochs,) | None\n The target labels. If None, Xdawn fit on the average evoked.\n\n Returns\n -------\n self : Xdawn instance\n The Xdawn instance.\n \"\"\"\n X, y = self._check_Xy(X, y)\n\n # Main function\n self.classes_ = np.unique(y)\n self.filters_, self.patterns_, _ = _fit_xdawn(\n X, y, n_components=self.n_components, reg=self.reg,\n signal_cov=self.signal_cov)\n return self\n\n def transform(self, X):\n \"\"\"Transform data with spatial filters.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_samples)\n The target data.\n\n Returns\n -------\n X : array, shape (n_epochs, n_components * n_classes, n_samples)\n The transformed data.\n \"\"\"\n X, _ = self._check_Xy(X)\n\n # Check size\n if self.filters_.shape[1] != X.shape[1]:\n raise ValueError('X must have %i channels, got %i instead.' % (\n self.filters_.shape[1], X.shape[1]))\n\n # Transform\n X = np.dot(self.filters_, X)\n X = X.transpose((1, 0, 2))\n return X\n\n def inverse_transform(self, X):\n \"\"\"Remove selected components from the signal.\n\n Given the unmixing matrix, transform data, zero out components,\n and inverse transform the data. This procedure will reconstruct\n the signals from which the dynamics described by the excluded\n components is subtracted.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_components * n_classes, n_times)\n The transformed data.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels * n_classes, n_times)\n The inverse transform data.\n \"\"\"\n # Check size\n X, _ = self._check_Xy(X)\n n_components, n_channels = self.patterns_.shape\n n_epochs, n_comp, n_times = X.shape\n if n_comp != (self.n_components * len(self.classes_)):\n raise ValueError('X must have %i components, got %i instead' % (\n self.n_components * len(self.classes_), n_comp))\n\n # Transform\n fast_dot = _get_fast_dot()\n return fast_dot(self.patterns_.T, X).transpose(1, 0, 2)\n\n def _check_Xy(self, X, y=None):\n \"\"\"Check X and y types and dimensions.\"\"\"\n # Check data\n if not isinstance(X, np.ndarray) or X.ndim != 3:\n raise ValueError('X must be an array of shape (n_epochs, '\n 'n_channels, n_samples).')\n if y is None:\n y = np.ones(len(X))\n y = np.asarray(y)\n if len(X) != len(y):\n raise ValueError('X and y must have the same length')\n return X, y\n\n\nclass Xdawn(_XdawnTransformer):\n \"\"\"Implementation of the Xdawn Algorithm.\n\n Xdawn [1]_ [2]_ is a spatial filtering method designed to improve the\n signal to signal + noise ratio (SSNR) of the ERP responses. Xdawn was\n originally designed for P300 evoked potential by enhancing the target\n response with respect to the non-target response. This implementation\n is a generalization to any type of ERP.\n\n Parameters\n ----------\n n_components : int (default 2)\n The number of components to decompose the signals.\n signal_cov : None | Covariance | ndarray, shape (n_channels, n_channels)\n (default None). The signal covariance used for whitening of the data.\n if None, the covariance is estimated from the epochs signal.\n correct_overlap : 'auto' or bool (default 'auto')\n Compute the independent evoked responses per condition, while\n correcting for event overlaps if any. If 'auto', then\n overlapp_correction = True if the events do overlap.\n reg : float | str | None (default None)\n if not None, allow regularization for covariance estimation\n if float, shrinkage covariance is used (0 <= shrinkage <= 1).\n if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')\n or Oracle Approximating Shrinkage ('oas').\n\n Attributes\n ----------\n ``filters_`` : dict of ndarray\n If fit, the Xdawn components used to decompose the data for each event\n type, else empty.\n ``patterns_`` : dict of ndarray\n If fit, the Xdawn patterns used to restore the signals for each event\n type, else empty.\n ``evokeds_`` : dict of evoked instance\n If fit, the evoked response for each event type.\n ``event_id_`` : dict of event id\n The event id.\n ``correct_overlap_``: bool\n Whether overlap correction was applied.\n\n Notes\n -----\n .. versionadded:: 0.10\n\n See Also\n --------\n mne.decoding.CSP\n\n References\n ----------\n .. [1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN\n algorithm to enhance evoked potentials: application to\n brain-computer interface. Biomedical Engineering, IEEE Transactions\n on, 56(8), 2035-2043.\n\n .. [2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J.\n (2011, August). Theoretical analysis of xDAWN algorithm:\n application to an efficient sensor selection in a P300 BCI. In\n Signal Processing Conference, 2011 19th European (pp. 1382-1386).\n IEEE.\n \"\"\"\n\n def __init__(self, n_components=2, signal_cov=None, correct_overlap='auto',\n reg=None):\n \"\"\"Init.\"\"\"\n super(Xdawn, self).__init__(n_components=n_components,\n signal_cov=signal_cov, reg=reg)\n if correct_overlap not in ['auto', True, False]:\n raise ValueError('correct_overlap must be a bool or \"auto\"')\n self.correct_overlap = correct_overlap\n\n def fit(self, epochs, y=None):\n \"\"\"Fit Xdawn from epochs.\n\n Parameters\n ----------\n epochs : Epochs object\n An instance of Epoch on which Xdawn filters will be fitted.\n y : ndarray | None (default None)\n If None, used epochs.events[:, 2].\n\n Returns\n -------\n self : Xdawn instance\n The Xdawn instance.\n \"\"\"\n # Check data\n if not isinstance(epochs, BaseEpochs):\n raise ValueError('epochs must be an Epochs object.')\n X = epochs.get_data()\n X = X[:, _pick_data_channels(epochs.info), :]\n y = epochs.events[:, 2] if y is None else y\n self.event_id_ = epochs.event_id\n\n # Check that no baseline was applied with correct overlap\n correct_overlap = self.correct_overlap\n if correct_overlap == 'auto':\n # Events are overlapped if the minimal inter-stimulus\n # interval is smaller than the time window.\n isi = np.diff(np.sort(epochs.events[:, 0]))\n window = int((epochs.tmax - epochs.tmin) * epochs.info['sfreq'])\n correct_overlap = isi.min() < window\n\n if epochs.baseline and correct_overlap:\n raise ValueError('Cannot apply correct_overlap if epochs'\n ' were baselined.')\n\n events, tmin, sfreq = None, 0., 1.\n if correct_overlap:\n events = epochs.events\n tmin = epochs.tmin\n sfreq = epochs.info['sfreq']\n self.correct_overlap_ = correct_overlap\n\n # Note: In this original version of Xdawn we compute and keep all\n # components. The selection comes at transform().\n n_components = X.shape[1]\n\n # Main fitting function\n filters, patterns, evokeds = _fit_xdawn(\n X, y, n_components=n_components, reg=self.reg,\n signal_cov=self.signal_cov, events=events, tmin=tmin, sfreq=sfreq)\n\n # Re-order filters and patterns according to event_id\n filters = filters.reshape(-1, n_components, filters.shape[-1])\n patterns = patterns.reshape(-1, n_components, patterns.shape[-1])\n self.filters_, self.patterns_, self.evokeds_ = dict(), dict(), dict()\n idx = np.argsort([value for _, value in iteritems(epochs.event_id)])\n for eid, this_filter, this_pattern, this_evo in zip(\n epochs.event_id, filters[idx], patterns[idx], evokeds[idx]):\n self.filters_[eid] = this_filter.T\n self.patterns_[eid] = this_pattern.T\n n_events = len(epochs[eid])\n evoked = EvokedArray(this_evo, epochs.info, tmin=epochs.tmin,\n comment=eid, nave=n_events)\n self.evokeds_[eid] = evoked\n return self\n\n def transform(self, epochs):\n \"\"\"Apply Xdawn dim reduction.\n\n Parameters\n ----------\n epochs : Epochs | ndarray, shape (n_epochs, n_channels, n_times)\n Data on which Xdawn filters will be applied.\n\n Returns\n -------\n X : ndarray, shape (n_epochs, n_components * n_event_types, n_times)\n Spatially filtered signals.\n \"\"\"\n if isinstance(epochs, BaseEpochs):\n X = epochs.get_data()\n elif isinstance(epochs, np.ndarray):\n X = epochs\n else:\n raise ValueError('Data input must be of Epoch type or numpy array')\n\n filters = [filt[:self.n_components]\n for filt in itervalues(self.filters_)]\n filters = np.concatenate(filters, axis=0)\n X = np.dot(filters, X)\n return X.transpose((1, 0, 2))\n\n def apply(self, inst, event_id=None, include=None, exclude=None):\n \"\"\"Remove selected components from the signal.\n\n Given the unmixing matrix, transform data,\n zero out components, and inverse transform the data.\n This procedure will reconstruct the signals from which\n the dynamics described by the excluded components is subtracted.\n\n Parameters\n ----------\n inst : instance of Raw | Epochs | Evoked\n The data to be processed.\n event_id : dict | list of str | None (default None)\n The kind of event to apply. if None, a dict of inst will be return\n one for each type of event xdawn has been fitted.\n include : array_like of int | None (default None)\n The indices referring to columns in the ummixing matrix. The\n components to be kept. If None, the first n_components (as defined\n in the Xdawn constructor) will be kept.\n exclude : array_like of int | None (default None)\n The indices referring to columns in the ummixing matrix. The\n components to be zeroed out. If None, all the components except the\n first n_components will be exclude.\n\n Returns\n -------\n out : dict of instance\n A dict of instance (from the same type as inst input) for each\n event type in event_id.\n \"\"\"\n if event_id is None:\n event_id = self.event_id_\n\n if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):\n raise ValueError('Data input must be Raw, Epochs or Evoked type')\n picks = _pick_data_channels(inst.info)\n\n # Define the components to keep\n default_exclude = list(range(self.n_components, len(inst.ch_names)))\n if exclude is None:\n exclude = default_exclude\n else:\n exclude = list(set(list(default_exclude) + list(exclude)))\n\n if isinstance(inst, BaseRaw):\n out = self._apply_raw(raw=inst, include=include, exclude=exclude,\n event_id=event_id, picks=picks)\n elif isinstance(inst, BaseEpochs):\n out = self._apply_epochs(epochs=inst, include=include, picks=picks,\n exclude=exclude, event_id=event_id)\n elif isinstance(inst, Evoked):\n out = self._apply_evoked(evoked=inst, include=include, picks=picks,\n exclude=exclude, event_id=event_id)\n return out\n\n def _apply_raw(self, raw, include, exclude, event_id, picks):\n \"\"\"Aux method.\"\"\"\n if not raw.preload:\n raise ValueError('Raw data must be preloaded to apply Xdawn')\n\n raws = dict()\n for eid in event_id:\n data = raw[picks, :][0]\n\n data = self._pick_sources(data, include, exclude, eid)\n\n raw_r = raw.copy()\n\n raw_r[picks, :] = data\n raws[eid] = raw_r\n return raws\n\n def _apply_epochs(self, epochs, include, exclude, event_id, picks):\n \"\"\"Aux method.\"\"\"\n if not epochs.preload:\n raise ValueError('Epochs must be preloaded to apply Xdawn')\n\n # special case where epochs come picked but fit was 'unpicked'.\n epochs_dict = dict()\n data = np.hstack(epochs.get_data()[:, picks])\n\n for eid in event_id:\n\n data_r = self._pick_sources(data, include, exclude, eid)\n data_r = np.array(np.split(data_r, len(epochs.events), 1))\n info_r = cp.deepcopy(epochs.info)\n epochs_r = EpochsArray(data=data_r, info=info_r,\n events=epochs.events, tmin=epochs.tmin,\n event_id=epochs.event_id, verbose=False)\n epochs_r.preload = True\n epochs_dict[eid] = epochs_r\n\n return epochs_dict\n\n def _apply_evoked(self, evoked, include, exclude, event_id, picks):\n \"\"\"Aux method.\"\"\"\n data = evoked.data[picks]\n evokeds = dict()\n\n for eid in event_id:\n\n data_r = self._pick_sources(data, include, exclude, eid)\n evokeds[eid] = evoked.copy()\n\n # restore evoked\n evokeds[eid].data[picks] = data_r\n\n return evokeds\n\n def _pick_sources(self, data, include, exclude, eid):\n \"\"\"Aux method.\"\"\"\n fast_dot = _get_fast_dot()\n\n logger.info('Transforming to Xdawn space')\n\n # Apply unmixing\n sources = fast_dot(self.filters_[eid].T, data)\n\n if include not in (None, list()):\n mask = np.ones(len(sources), dtype=np.bool)\n mask[np.unique(include)] = False\n sources[mask] = 0.\n logger.info('Zeroing out %i Xdawn components' % mask.sum())\n elif exclude not in (None, list()):\n exclude_ = np.unique(exclude)\n sources[exclude_] = 0.\n logger.info('Zeroing out %i Xdawn components' % len(exclude_))\n logger.info('Inverse transforming to sensor space')\n data = fast_dot(self.patterns_[eid], sources)\n\n return data\n\n def inverse_transform(self):\n \"\"\"Not implemented, see Xdawn.apply() instead.\"\"\"\n # Exists because of _XdawnTransformer\n raise NotImplementedError('See Xdawn.apply()')\n"
]
| [
[
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.dot",
"scipy.linalg.toeplitz",
"numpy.asarray",
"numpy.zeros",
"scipy.linalg.eigh",
"numpy.linalg.pinv",
"numpy.tile",
"numpy.min",
"numpy.mean",
"numpy.apply_along_axis",
"numpy.sort",
"numpy.argsort",
"numpy.hstack",
"numpy.unique"
]
]
|
vnshanmukh/nowcasting_dataset | [
"168305ba62abb035c4dbb2cf2089722b952e7311"
]
| [
"nowcasting_dataset/time.py"
]
| [
"\"\"\" Time functions \"\"\"\nimport logging\nimport random\nimport warnings\nfrom typing import Dict, Iterable, List, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport pvlib\n\nfrom nowcasting_dataset import geospatial, utils\n\nlogger = logging.getLogger(__name__)\n\n\nFIVE_MINUTES = pd.Timedelta(\"5 minutes\")\nTHIRTY_MINUTES = pd.Timedelta(\"30 minutes\")\n\n\ndef select_daylight_datetimes(\n datetimes: pd.DatetimeIndex, locations: Iterable[Tuple[float, float]], ghi_threshold: float = 10\n) -> pd.DatetimeIndex:\n \"\"\"\n Select only the day time datetimes\n\n Args:\n datetimes: DatetimeIndex to filter.\n locations: List of Tuples of x, y coordinates in OSGB projection.\n For example, use the four corners of the satellite imagery.\n ghi_threshold: Global horizontal irradiance threshold.\n (Watts per square meter?)\n\n Returns: datetimes for which the global horizontal irradiance (GHI) is above ghi_threshold\n across all locations.\n\n \"\"\"\n ghi_for_all_locations = []\n for x, y in locations:\n lat, lon = geospatial.osgb_to_lat_lon(x, y)\n location = pvlib.location.Location(latitude=lat, longitude=lon)\n with warnings.catch_warnings():\n # PyTables triggers a DeprecationWarning in Numpy >= 1.20:\n # \"tables/array.py:241: DeprecationWarning: `np.object` is a\n # deprecated alias for the builtin `object`.\"\n # See https://github.com/PyTables/PyTables/issues/898\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n clearsky = location.get_clearsky(datetimes)\n ghi = clearsky[\"ghi\"]\n ghi_for_all_locations.append(ghi)\n\n ghi_for_all_locations = pd.concat(ghi_for_all_locations, axis=\"columns\")\n max_ghi = ghi_for_all_locations.max(axis=\"columns\")\n mask = max_ghi > ghi_threshold\n return datetimes[mask]\n\n\ndef single_period_to_datetime_index(period: pd.Series, freq: str) -> pd.DatetimeIndex:\n \"\"\"Return a DatetimeIndex from period['start_dt'] to period['end_dt'] at frequency freq.\n\n Before computing the date_range, this function first takes the ceiling of the\n start_dt (at frequency `freq`); and takes the floor of end_dt. For example,\n if `freq` is '5 minutes' and `start_dt` is 12:03, then the ceiling of `start_dt`\n will be 12:05. This is done so that all the returned datetimes are aligned to `freq`\n (e.g. if `freq` is '5 minutes' then every returned datetime will be at 00, 05, ..., 55\n minutes past the hour.\n \"\"\"\n start_dt = period[\"start_dt\"].ceil(freq)\n end_dt = period[\"end_dt\"].floor(freq)\n return pd.date_range(start_dt, end_dt, freq=freq)\n\n\ndef time_periods_to_datetime_index(time_periods: pd.DataFrame, freq: str) -> pd.DatetimeIndex:\n \"\"\"Convert a DataFrame of time periods into a DatetimeIndex at a particular frequency.\n\n See the docstring of intersection_of_2_dataframes_of_periods() for more details.\n \"\"\"\n assert len(time_periods) > 0\n dt_index = single_period_to_datetime_index(time_periods.iloc[0], freq=freq)\n for _, time_period in time_periods.iloc[1:].iterrows():\n new_dt_index = single_period_to_datetime_index(time_period, freq=freq)\n dt_index = dt_index.union(new_dt_index)\n return dt_index\n\n\ndef intersection_of_multiple_dataframes_of_periods(\n time_periods: list[pd.DataFrame],\n) -> pd.DataFrame:\n \"\"\"Finds the intersection of a list of time periods.\n\n See the docstring of intersection_of_2_dataframes_of_periods() for more details.\n \"\"\"\n assert len(time_periods) > 0\n if len(time_periods) == 1:\n return time_periods[0]\n intersection = intersection_of_2_dataframes_of_periods(time_periods[0], time_periods[1])\n for time_period in time_periods[2:]:\n intersection = intersection_of_2_dataframes_of_periods(intersection, time_period)\n return intersection\n\n\ndef intersection_of_2_dataframes_of_periods(a: pd.DataFrame, b: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Finds the intersection of two pd.DataFrames of time periods.\n\n Each row of each pd.DataFrame represents a single time period. Each pd.DataFrame has\n two columns: `start_dt` and `end_dt` (where 'dt' is short for 'datetime').\n\n A typical use-case is that each pd.DataFrame represents all the time periods where\n a `DataSource` has contiguous, valid data.\n\n Here's a graphical example of two pd.DataFrames of time periods and their intersection:\n\n ----------------------> TIME ->---------------------\n a: |-----| |----| |----------| |-----------|\n b: |--------| |----| |---|\n intersection: |--| |-| |--| |---|\n\n Args:\n a, b: pd.DataFrame where each row represents a time period. The pd.DataFrame has\n two columns: start_dt and end_dt.\n\n Returns:\n Sorted list of intersecting time periods represented as a pd.DataFrame with two columns:\n start_dt and end_dt.\n \"\"\"\n if a.empty or b.empty:\n return pd.DataFrame(columns=[\"start_dt\", \"end_dt\"])\n\n all_intersecting_periods = []\n for a_period in a.itertuples():\n # Five ways in which two periods may overlap:\n # a: |----| or |---| or |---| or |--| or |-|\n # b: |--| |---| |---| |------| |-|\n # In all five, `a` must always start before `b` ends,\n # and `a` must always end after `b` starts:\n overlapping_periods = b[(a_period.start_dt < b.end_dt) & (a_period.end_dt > b.start_dt)]\n\n # There are two ways in which two periods may *not* overlap:\n # a: |---| or |---|\n # b: |---| |---|\n # `overlapping` will not include periods which do *not* overlap.\n\n # Now find the intersection of each period in `overlapping_periods` with\n # the period from `a` that starts at `a_start_dt` and ends at `a_end_dt`.\n # We do this by clipping each row of `overlapping_periods`\n # to start no earlier than `a_start_dt`, and end no later than `a_end_dt`.\n\n # First, make a copy, so we don't clip the underlying data in `b`.\n intersecting_periods = overlapping_periods.copy()\n intersecting_periods.start_dt.clip(lower=a_period.start_dt, inplace=True)\n intersecting_periods.end_dt.clip(upper=a_period.end_dt, inplace=True)\n\n all_intersecting_periods.append(intersecting_periods)\n\n all_intersecting_periods = pd.concat(all_intersecting_periods)\n return all_intersecting_periods.sort_values(by=\"start_dt\").reset_index(drop=True)\n\n\ndef get_contiguous_time_periods(\n datetimes: pd.DatetimeIndex,\n min_seq_length: int,\n max_gap_duration: pd.Timedelta = THIRTY_MINUTES,\n) -> pd.DataFrame:\n \"\"\"Returns a pd.DataFrame where each row records the boundary of a contiguous time periods.\n\n Args:\n datetimes: The pd.DatetimeIndex of the timeseries. Must be sorted.\n min_seq_length: Sequences of min_seq_length or shorter will be discarded. Typically, this\n would be set to the `total_seq_length` of each machine learning example.\n max_gap_duration: If any pair of consecutive `datetimes` is more than `max_gap_duration`\n apart, then this pair of `datetimes` will be considered a \"gap\" between two contiguous\n sequences. Typically, `max_gap_duration` would be set to the sample period of\n the timeseries.\n\n Returns:\n pd.DataFrame where each row represents a single time period. The pd.DataFrame\n has two columns: `start_dt` and `end_dt` (where 'dt' is short for 'datetime').\n \"\"\"\n # Sanity checks.\n assert len(datetimes) > 0\n assert min_seq_length > 1\n assert datetimes.is_monotonic_increasing\n assert datetimes.is_unique\n\n # Find indices of gaps larger than max_gap:\n gap_mask = np.diff(datetimes) > max_gap_duration\n gap_indices = np.argwhere(gap_mask)[:, 0]\n\n # gap_indicies are the indices into dt_index for the timestep immediately\n # *before* the gap. e.g. if the datetimes at 12:00, 12:05, 18:00, 18:05\n # then gap_indicies will be [1]. So we add 1 to gap_indices to get\n # segment_boundaries, an index into dt_index which identifies the _start_\n # of each segment.\n segment_boundaries = gap_indices + 1\n\n # Capture the last segment of dt_index.\n segment_boundaries = np.concatenate((segment_boundaries, [len(datetimes)]))\n\n periods: List[Dict[str, pd.Timestamp]] = []\n start_i = 0\n for next_start_i in segment_boundaries:\n n_timesteps = next_start_i - start_i\n if n_timesteps > min_seq_length:\n end_i = next_start_i - 1\n period = {\"start_dt\": datetimes[start_i], \"end_dt\": datetimes[end_i]}\n periods.append(period)\n start_i = next_start_i\n\n assert len(periods) > 0\n\n return pd.DataFrame(periods)\n\n\ndef make_random_time_vectors(batch_size, seq_length_5_minutes, seq_length_30_minutes):\n \"\"\"\n Make random time vectors\n\n 1. t0_dt, Get random datetimes from 2019\n 2. Exapnd t0_dt to make 5 and 30 mins sequences\n\n Args:\n batch_size: the batch size\n seq_length_5_minutes: the length of the sequence in 5 mins deltas\n seq_length_30_minutes: the length of the sequence in 30 mins deltas\n\n Returns:\n - t0_dt: [batch_size] random init datetimes\n - time_5: [batch_size, seq_length_5_minutes] random sequence of datetimes, with\n 5 mins deltas.\n t0_dt is in the middle of the sequence\n - time_30: [batch_size, seq_length_30_minutes] random sequence of datetimes, with\n 30 mins deltas.\n t0_dt is in the middle of the sequence\n \"\"\"\n delta_5 = pd.Timedelta(minutes=5)\n delta_30 = pd.Timedelta(minutes=30)\n\n data_range = pd.date_range(\"2019-01-01\", \"2021-01-01\", freq=\"5T\")\n t0_dt = pd.Series(random.choices(data_range, k=batch_size))\n time_5 = (\n pd.DataFrame([t0_dt + i * delta_5 for i in range(seq_length_5_minutes)])\n - int(seq_length_5_minutes / 2) * delta_5\n )\n time_30 = (\n pd.DataFrame([t0_dt + i * delta_30 for i in range(seq_length_30_minutes)])\n - int(seq_length_30_minutes / 2) * delta_5\n )\n\n t0_dt = utils.to_numpy(t0_dt).astype(np.int32)\n time_5 = utils.to_numpy(time_5.T).astype(np.int32)\n time_30 = utils.to_numpy(time_30.T).astype(np.int32)\n\n return t0_dt, time_5, time_30\n"
]
| [
[
"pandas.Timedelta",
"pandas.DataFrame",
"pandas.date_range",
"numpy.diff",
"pandas.concat",
"numpy.argwhere"
]
]
|
vredzhepov/wfdb-python | [
"6be3066213eb87dc646ca61f23d1745adde86482"
]
| [
"wfdb/io/tff.py"
]
| [
"\"\"\"\nModule for reading ME6000 .tff format files.\n\nhttp://www.biomation.com/kin/me6000.htm\n\n\"\"\"\nimport datetime\nimport os\nimport struct\n\nimport numpy as np\n\n\ndef rdtff(file_name, cut_end=False):\n \"\"\"\n Read values from a tff file.\n\n Parameters\n ----------\n file_name : str\n Name of the .tff file to read.\n cut_end : bool, optional\n If True, cuts out the last sample for all channels. This is for\n reading files which appear to terminate with the incorrect\n number of samples (ie. sample not present for all channels).\n\n Returns\n -------\n signal : ndarray\n A 2d numpy array storing the physical signals from the record.\n fields : dict\n A dictionary containing several key attributes of the read record.\n markers : ndarray\n A 1d numpy array storing the marker locations.\n triggers : ndarray\n A 1d numpy array storing the trigger locations.\n\n Notes\n -----\n This function is slow because tff files may contain any number of\n escape sequences interspersed with the signals. There is no way to\n know the number of samples/escape sequences beforehand, so the file\n is inefficiently parsed a small chunk at a time.\n\n It is recommended that you convert your tff files to WFDB format.\n\n \"\"\"\n file_size = os.path.getsize(file_name)\n with open(file_name, 'rb') as fp:\n fields, file_fields = _rdheader(fp)\n signal, markers, triggers = _rdsignal(fp, file_size=file_size,\n header_size=file_fields['header_size'],\n n_sig=file_fields['n_sig'],\n bit_width=file_fields['bit_width'],\n is_signed=file_fields['is_signed'],\n cut_end=cut_end)\n return signal, fields, markers, triggers\n\n\ndef _rdheader(fp):\n \"\"\"\n Read header info of the windaq file.\n\n Parameters\n ----------\n fp : file IO object\n The input header file to be read.\n\n Returns\n -------\n fields : dict\n For interpreting the waveforms.\n file_fields : dict\n For reading the signal samples.\n\n \"\"\"\n tag = None\n # The '2' tag indicates the end of tags.\n while tag != 2:\n # For each header element, there is a tag indicating data type,\n # followed by the data size, followed by the data itself. 0's\n # pad the content to the nearest 4 bytes. If data_len=0, no pad.\n tag = struct.unpack('>H', fp.read(2))[0]\n data_size = struct.unpack('>H', fp.read(2))[0]\n pad_len = (4 - (data_size % 4)) % 4\n pos = fp.tell()\n # Currently, most tags will be ignored...\n # storage method\n if tag == 1001:\n storage_method = fs = struct.unpack('B', fp.read(1))[0]\n storage_method = {0:'recording', 1:'manual', 2:'online'}[storage_method]\n # fs, unit16\n elif tag == 1003:\n fs = struct.unpack('>H', fp.read(2))[0]\n # sensor type\n elif tag == 1007:\n # Each byte contains information for one channel\n n_sig = data_size\n channel_data = struct.unpack('>%dB' % data_size, fp.read(data_size))\n # The documentation states: \"0 : Channel is not used\"\n # This means the samples are NOT saved.\n channel_map = ((1, 1, 'emg'),\n (15, 30, 'goniometer'), (31, 46, 'accelerometer'),\n (47, 62, 'inclinometer'),\n (63, 78, 'polar_interface'), (79, 94, 'ecg'),\n (95, 110, 'torque'), (111, 126, 'gyrometer'),\n (127, 142, 'sensor'))\n sig_name = []\n # The number range that the data lies between gives the\n # channel\n for data in channel_data:\n # Default case if byte value falls outside of channel map\n base_name = 'unknown'\n # Unused channel\n if data == 0:\n n_sig -= 1\n break\n for item in channel_map:\n if item[0] <= data <= item[1]:\n base_name = item[2]\n break\n existing_count = [base_name in name for name in sig_name].count(True)\n sig_name.append('%s_%d' % (base_name, existing_count))\n # Display scale. Probably not useful.\n elif tag == 1009:\n # 100, 500, 1000, 2500, or 8500uV\n display_scale = struct.unpack('>I', fp.read(4))[0]\n # sample format, uint8\n elif tag == 3:\n sample_fmt = struct.unpack('B', fp.read(1))[0]\n is_signed = bool(sample_fmt >> 7)\n # ie. 8 or 16 bits\n bit_width = sample_fmt & 127\n # Measurement start time - seconds from 1.1.1970 UTC\n elif tag == 101:\n n_seconds = struct.unpack('>I', fp.read(4))[0]\n base_datetime = datetime.datetime.utcfromtimestamp(n_seconds)\n base_date = base_datetime.date()\n base_time = base_datetime.time()\n # Measurement start time - minutes from UTC\n elif tag == 102:\n n_minutes = struct.unpack('>h', fp.read(2))[0]\n # Go to the next tag\n fp.seek(pos + data_size + pad_len)\n header_size = fp.tell()\n # For interpreting the waveforms\n fields = {'fs':fs, 'n_sig':n_sig, 'sig_name':sig_name,\n 'base_time':base_time, 'base_date':base_date}\n # For reading the signal samples\n file_fields = {'header_size':header_size, 'n_sig':n_sig,\n 'bit_width':bit_width, 'is_signed':is_signed}\n return fields, file_fields\n\n\ndef _rdsignal(fp, file_size, header_size, n_sig, bit_width, is_signed, cut_end):\n \"\"\"\n Read the signal.\n\n Parameters\n ----------\n fp : file IO object\n The input header file to be read.\n file_size : int\n Size of the file in bytes.\n header_size : int\n Size of the header file in bytes.\n n_sig : int\n The number of signals contained in the dat file.\n bit_width : int\n The number of bits necessary to represent the number in binary.\n is_signed : bool\n Whether the number is signed (True) or not (False).\n cut_end : bool, optional\n If True, enables reading the end of files which appear to terminate\n with the incorrect number of samples (ie. sample not present for all channels),\n by checking and skipping the reading the end of such files.\n Checking this option makes reading slower.\n \n Returns\n -------\n signal : ndarray\n Tranformed expanded signal into uniform signal.\n markers : ndarray\n A 1d numpy array storing the marker locations.\n triggers : ndarray\n A 1d numpy array storing the trigger locations.\n\n \"\"\"\n # Cannot initially figure out signal length because there\n # are escape sequences.\n fp.seek(header_size)\n signal_size = file_size - header_size\n byte_width = int(bit_width / 8)\n # numpy dtype\n dtype = str(byte_width)\n if is_signed:\n dtype = 'i' + dtype\n else:\n dtype = 'u' + dtype\n # big endian\n dtype = '>' + dtype\n # The maximum possible samples given the file size\n # All channels must be present\n max_samples = int(signal_size / byte_width)\n max_samples = max_samples - max_samples % n_sig\n # Output information\n signal = np.empty(max_samples, dtype=dtype)\n markers = []\n triggers = []\n # Number of (total) samples read\n sample_num = 0\n\n # Read one sample for all channels at a time\n if cut_end:\n stop_byte = file_size - n_sig * byte_width + 1\n while fp.tell() < stop_byte:\n chunk = fp.read(2)\n sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num)\n else:\n while True:\n chunk = fp.read(2)\n if not chunk:\n break\n sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num)\n\n # No more bytes to read. Reshape output arguments.\n signal = signal[:sample_num]\n signal = signal.reshape((-1, n_sig))\n markers = np.array(markers, dtype='int')\n triggers = np.array(triggers, dtype='int')\n return signal, markers, triggers\n\n\ndef _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num):\n \"\"\"\n Get the total number of samples in the signal.\n\n Parameters\n ----------\n fp : file IO object\n The input header file to be read.\n chunk : str\n The data currently being processed.\n n_sig : int\n The number of signals contained in the dat file.\n dtype : str\n String numpy dtype used to store the signal of the given\n resolution.\n signal : ndarray\n Tranformed expanded signal into uniform signal. \n markers : ndarray\n A 1d numpy array storing the marker locations.\n triggers : ndarray\n A 1d numpy array storing the trigger locations.\n sample_num : int\n The total number of samples in the signal.\n\n Returns\n -------\n sample_num : int\n The total number of samples in the signal.\n\n \"\"\"\n tag = struct.unpack('>h', chunk)[0]\n # Escape sequence\n if tag == -32768:\n # Escape sequence structure: int16 marker, uint8 type,\n # uint8 length, uint8 * length data, padding % 2\n escape_type = struct.unpack('B', fp.read(1))[0]\n data_len = struct.unpack('B', fp.read(1))[0]\n # Marker*\n if escape_type == 1:\n # *In manual mode, this could be block start/top time.\n # But we are it is just a single time marker.\n markers.append(sample_num / n_sig)\n # Trigger\n elif escape_type == 2:\n triggers.append(sample_num / n_sig)\n fp.seek(data_len + data_len % 2, 1)\n # Regular samples\n else:\n fp.seek(-2, 1)\n signal[sample_num:sample_num + n_sig] = np.fromfile(\n fp, dtype=dtype, count=n_sig)\n sample_num += n_sig\n return sample_num\n"
]
| [
[
"numpy.array",
"numpy.empty",
"numpy.fromfile"
]
]
|
facebookresearch/hsd3 | [
"2fdacb3accf3056637d6420358a9349898f7de3f"
]
| [
"hucc/agents/sacse.py"
]
| [
"# Copyright (c) 2021-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport logging\nfrom copy import copy, deepcopy\nfrom types import SimpleNamespace\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport gym\nimport hydra\nimport numpy as np\nimport torch as th\nfrom omegaconf import DictConfig\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom hucc import ReplayBuffer\nfrom hucc.agents import Agent\nfrom hucc.models import TracedModule\n\nlog = logging.getLogger(__name__)\n\n\nclass SACSEAgent(Agent):\n '''\n Soft Actor-Critic \"Switching Ensemble\" agent, originally proposed with TD3\n in \"Why Does Hierarchy (Sometimes) Work So Well in Reinforcement Learning?\",\n Nachum et al., 2019.\n Both `module.pi` and `module.q` are will be conditioned on an extra one-hot\n 'task' input, corresponding to the ensemble index. This is because we have\n some models bilinear models available to work with that :)\n '''\n\n def __init__(\n self,\n env: gym.Env,\n model: nn.Module,\n optim: SimpleNamespace,\n cfg: DictConfig,\n ):\n super().__init__(cfg)\n if not hasattr(model, 'pi'):\n raise ValueError('Model needs \"pi\" module')\n if not hasattr(model, 'q'):\n raise ValueError('Model needs \"q\" module')\n if not isinstance(env.action_space, gym.spaces.Box):\n raise ValueError(\n f'SACSEAgent requires a continuous (Box) action space (but got {type(env.action_space)})'\n )\n\n self._model = model\n self._n_models = int(cfg.n_models)\n self._optim = optim\n self._bsz = int(cfg.batch_size)\n self._gamma = float(cfg.gamma)\n self._polyak = float(cfg.polyak)\n self._rpbuf_size = int(cfg.rpbuf_size)\n self._samples_per_update = int(cfg.samples_per_update)\n self._num_updates = int(cfg.num_updates)\n self._warmup_samples = int(cfg.warmup_samples)\n self._randexp_samples = int(cfg.randexp_samples)\n self._clip_grad_norm = float(cfg.clip_grad_norm)\n self._c_switch = int(cfg.c_switch)\n\n self._target_entropy = (\n -np.prod(env.action_space.shape) * cfg.target_entropy_factor\n )\n # Optimize log(alpha) so that we'll always have a positive factor\n log_alpha = self._n_models * [np.log(cfg.alpha)]\n if cfg.optim_alpha is None:\n self._log_alpha = th.tensor(log_alpha)\n self._optim_alpha = None\n else:\n self._log_alpha = th.tensor(log_alpha, requires_grad=True)\n self._optim_alpha = hydra.utils.instantiate(\n cfg.optim_alpha, [self._log_alpha]\n )\n\n self._buffer = ReplayBuffer(\n size=self._rpbuf_size, interleave=env.num_envs\n )\n self._n_samples_since_update = 0\n self._cur_rewards: List[th.Tensor] = []\n\n self._target = deepcopy(model)\n # We'll never need gradients for the target network\n for param in self._target.parameters():\n param.requires_grad_(False)\n\n self._q = self._model.q\n self._q_tgt = self._target.q\n if cfg.trace:\n self._q = TracedModule(self._model.q)\n self._q_tgt = TracedModule(self._target.q)\n\n self._action_space = env.action_space\n self._action_factor = env.action_space.high[0]\n self._obs_space = self.effective_observation_space(env, cfg)\n self._obs_keys = list(self._obs_space.spaces.keys())\n\n self._mod: Dict[str, nn.Module] = {}\n\n self.set_checkpoint_attr(\n '_model',\n '_target',\n '_optim',\n '_log_alpha',\n '_optim_alpha',\n )\n\n @staticmethod\n def effective_observation_space(env: gym.Env, cfg: DictConfig):\n if not isinstance(env.observation_space, gym.spaces.Dict):\n raise ValueError(\n f'SACSEAgent requires a dictionary observation space (but got {type(env.observation_space_space)})'\n )\n spaces = copy(env.observation_space.spaces)\n if not 'time' in spaces:\n raise ValueError(f'SACSEAgent requires a \"time\" observation')\n del spaces['time']\n if 'task' in spaces:\n raise ValueError(\n f'SACSEAgent can\\'t work with a \"task\" observation'\n )\n spaces['task'] = gym.spaces.Box(\n low=0, high=1, shape=(cfg.n_models,), dtype=np.float32\n )\n return gym.spaces.Dict(spaces)\n\n def action(self, env, obs) -> Tuple[th.Tensor, Any]:\n step = obs['time'].remainder(self._c_switch).long().view(-1)\n keep_idx = step != 0\n idx = env.ctx.get('idx', None)\n if self.training:\n new_idx = th.randint(\n 0, self._n_models, (env.num_envs,), device=step.device\n )\n else:\n # Fixed ensemble member for evaluations\n new_idx = th.zeros_like(step)\n if idx is None:\n idx = new_idx\n else:\n idx = keep_idx * idx + th.logical_not(keep_idx) * new_idx\n mobs = copy(obs)\n del mobs['time']\n mobs['task'] = F.one_hot(idx, self._n_models).float()\n\n with th.no_grad():\n if self._n_samples < self._randexp_samples and self.training:\n action = th.stack(\n [\n th.from_numpy(self._action_space.sample())\n for i in range(env.num_envs)\n ]\n ).to(list(self._model.parameters())[0].device)\n else:\n dist = self._model.pi(mobs)\n assert (\n dist.has_rsample\n ), f'rsample() required for policy distribution'\n if self.training:\n action = dist.sample() * self._action_factor\n else:\n action = dist.mean * self._action_factor\n\n env.ctx['idx'] = idx\n return action, idx\n\n def step(\n self,\n env,\n obs,\n action: th.Tensor,\n extra: Any,\n result: Tuple[th.Tensor, th.Tensor, th.Tensor, List[Dict]],\n ) -> None:\n next_obs, reward, done, info = result\n idx = extra\n # Ignore terminal state if we have a timeout\n for i in range(len(info)):\n if 'TimeLimit.truncated' in info[i]:\n done[i] = False\n\n d = dict(\n action=action,\n reward=reward,\n terminal=done,\n )\n for k in self._obs_keys:\n if k != 'task':\n d[f'obs_{k}'] = obs[k]\n d[f'next_obs_{k}'] = next_obs[k]\n d['obs_task'] = idx\n d['next_obs_task'] = idx\n\n self._buffer.put_row(d)\n self._cur_rewards.append(reward)\n\n self._n_steps += 1\n self._n_samples += done.nelement()\n self._n_samples_since_update += done.nelement()\n if self._buffer.size < self._warmup_samples:\n return\n if self._n_samples_since_update >= self._samples_per_update:\n self.update()\n self._cur_rewards.clear()\n self._n_samples_since_update = 0\n\n def _update(self):\n for p in self._model.parameters():\n mdevice = p.device\n break\n\n def act_logp(obs):\n dist = self._model.pi(obs)\n action = dist.rsample()\n log_prob = dist.log_prob(action).sum(dim=-1)\n action = action * self._action_factor\n return action, log_prob\n\n # We'll equally feed samples to all models\n idx = th.arange(self._bsz * self._n_models) // self._bsz\n idx_in = F.one_hot(idx, self._n_models).to(\n dtype=th.float32, device=mdevice\n )\n\n for _ in range(self._num_updates):\n batch = self._buffer.get_batch(\n self._bsz * self._n_models,\n device=mdevice,\n )\n reward = batch['reward']\n not_done = th.logical_not(batch['terminal'])\n obs = {k: batch[f'obs_{k}'] for k in self._obs_keys}\n obs_p = {k: batch[f'next_obs_{k}'] for k in self._obs_keys}\n obs['task'] = idx_in\n obs_p['task'] = idx_in\n alpha = (\n self._log_alpha.detach()\n .exp()\n .to(dtype=th.float32, device=mdevice)[idx]\n )\n\n # Backup for Q-Function\n with th.no_grad():\n a_p, log_prob_p = act_logp(obs_p)\n q_in = dict(action=a_p, **obs_p)\n q_tgt = th.min(self._q_tgt(q_in), dim=-1).values\n backup = reward + self._gamma * not_done * (\n q_tgt - alpha * log_prob_p\n )\n\n # Q-Function update\n q_in = dict(action=batch['action'], **obs)\n q = self._q(q_in)\n q1 = q[:, 0]\n q2 = q[:, 1]\n q1_loss = F.mse_loss(q1, backup)\n q2_loss = F.mse_loss(q2, backup)\n q_loss = q1_loss + q2_loss\n self._optim.q.zero_grad()\n q_loss.backward()\n if self._clip_grad_norm > 0.0:\n nn.utils.clip_grad_norm_(\n self._model.q.parameters(), self._clip_grad_norm\n )\n self._optim.q.step()\n\n # Policy update\n for param in self._model.q.parameters():\n param.requires_grad_(False)\n\n a, log_prob = act_logp(obs)\n q_in = dict(action=a, **obs)\n q = th.min(self._q(q_in), dim=-1).values\n pi_loss = (alpha * log_prob - q).mean()\n self._optim.pi.zero_grad()\n pi_loss.backward()\n if self._clip_grad_norm > 0.0:\n nn.utils.clip_grad_norm_(\n self._model.pi[0].parameters(), self._clip_grad_norm\n )\n self._optim.pi.step()\n\n for param in self._model.q.parameters():\n param.requires_grad_(True)\n\n # Optional temperature update\n if self._optim_alpha:\n alpha_loss = -(\n self._log_alpha.exp()[idx]\n * (log_prob.cpu() + self._target_entropy).detach()\n )\n self._optim_alpha.zero_grad()\n alpha_loss.mean().backward()\n self._optim_alpha.step()\n\n # Update target network\n with th.no_grad():\n for tp, p in zip(\n self._target.q.parameters(), self._model.q.parameters()\n ):\n tp.data.lerp_(p.data, 1.0 - self._polyak)\n\n # These are the stats for the last update\n self.tbw_add_scalar('Loss/Policy', pi_loss.item())\n self.tbw_add_scalar('Loss/QValue', q_loss.item())\n self.tbw_add_scalar('Health/Entropy', -log_prob.mean())\n if self._optim_alpha:\n self.tbw_add_scalar(\n 'Health/Alpha', self._log_alpha.exp().mean().item()\n )\n if self._n_updates % 100 == 1:\n self.tbw.add_scalars(\n 'Health/GradNorms',\n {\n k: v.grad.norm().item()\n for k, v in self._model.named_parameters()\n if v.grad is not None\n },\n self.n_samples,\n )\n\n avg_cr = th.cat(self._cur_rewards).mean().item()\n log.info(\n f'Sample {self._n_samples}, up {self._n_updates*self._num_updates}, avg cur reward {avg_cr:+0.3f}, pi loss {pi_loss.item():+.03f}, q loss {q_loss.item():+.03f}, entropy {-log_prob.mean().item():+.03f}, alpha {self._log_alpha.exp().mean().item():.03f}'\n )\n"
]
| [
[
"torch.cat",
"torch.nn.functional.one_hot",
"torch.arange",
"numpy.log",
"torch.no_grad",
"torch.nn.functional.mse_loss",
"torch.logical_not",
"torch.randint",
"numpy.prod",
"torch.tensor",
"torch.zeros_like"
]
]
|
maartenbreddels/incubator-superset | [
"5616d7bdd72cb6d41caaf8733823d5d7dbe26abb"
]
| [
"superset/connectors/sqla/models.py"
]
| [
"# -*- coding: utf-8 -*-\n# pylint: disable=C,R,W\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nimport logging\n\nfrom flask import escape, Markup\nfrom flask_appbuilder import Model\nfrom flask_babel import lazy_gettext as _\nimport pandas as pd\nimport sqlalchemy as sa\nfrom sqlalchemy import (\n and_, asc, Boolean, Column, DateTime, desc, ForeignKey, Integer, or_,\n select, String, Text,\n)\nfrom sqlalchemy.orm import backref, relationship\nfrom sqlalchemy.schema import UniqueConstraint\nfrom sqlalchemy.sql import column, literal_column, table, text\nfrom sqlalchemy.sql.expression import TextAsFrom\nimport sqlparse\n\nfrom superset import app, db, import_util, security_manager, utils\nfrom superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric\nfrom superset.jinja_context import get_template_processor\nfrom superset.models.annotations import Annotation\nfrom superset.models.core import Database\nfrom superset.models.helpers import QueryResult\nfrom superset.utils import DTTM_ALIAS, QueryStatus\n\nconfig = app.config\n\n\nclass AnnotationDatasource(BaseDatasource):\n \"\"\" Dummy object so we can query annotations using 'Viz' objects just like\n regular datasources.\n \"\"\"\n\n cache_timeout = 0\n\n def query(self, query_obj):\n df = None\n error_message = None\n qry = db.session.query(Annotation)\n qry = qry.filter(Annotation.layer_id == query_obj['filter'][0]['val'])\n if query_obj['from_dttm']:\n qry = qry.filter(Annotation.start_dttm >= query_obj['from_dttm'])\n if query_obj['to_dttm']:\n qry = qry.filter(Annotation.end_dttm <= query_obj['to_dttm'])\n status = QueryStatus.SUCCESS\n try:\n df = pd.read_sql_query(qry.statement, db.engine)\n except Exception as e:\n status = QueryStatus.FAILED\n logging.exception(e)\n error_message = (\n utils.error_msg_from_exception(e))\n return QueryResult(\n status=status,\n df=df,\n duration=0,\n query='',\n error_message=error_message)\n\n def get_query_str(self, query_obj):\n raise NotImplementedError()\n\n def values_for_column(self, column_name, limit=10000):\n raise NotImplementedError()\n\n\nclass TableColumn(Model, BaseColumn):\n\n \"\"\"ORM object for table columns, each table can have multiple columns\"\"\"\n\n __tablename__ = 'table_columns'\n __table_args__ = (UniqueConstraint('table_id', 'column_name'),)\n table_id = Column(Integer, ForeignKey('tables.id'))\n table = relationship(\n 'SqlaTable',\n backref=backref('columns', cascade='all, delete-orphan'),\n foreign_keys=[table_id])\n is_dttm = Column(Boolean, default=False)\n expression = Column(Text, default='')\n python_date_format = Column(String(255))\n database_expression = Column(String(255))\n\n export_fields = (\n 'table_id', 'column_name', 'verbose_name', 'is_dttm', 'is_active',\n 'type', 'groupby', 'count_distinct', 'sum', 'avg', 'max', 'min',\n 'filterable', 'expression', 'description', 'python_date_format',\n 'database_expression',\n )\n\n update_from_object_fields = [\n s for s in export_fields if s not in ('table_id',)]\n export_parent = 'table'\n\n def get_sqla_col(self, label=None):\n db_engine_spec = self.table.database.db_engine_spec\n label = db_engine_spec.make_label_compatible(label if label else self.column_name)\n if not self.expression:\n col = column(self.column_name).label(label)\n else:\n col = literal_column(self.expression).label(label)\n return col\n\n @property\n def datasource(self):\n return self.table\n\n def get_time_filter(self, start_dttm, end_dttm):\n col = self.get_sqla_col(label='__time')\n l = [] # noqa: E741\n if start_dttm:\n l.append(col >= text(self.dttm_sql_literal(start_dttm)))\n if end_dttm:\n l.append(col <= text(self.dttm_sql_literal(end_dttm)))\n return and_(*l)\n\n def get_timestamp_expression(self, time_grain):\n \"\"\"Getting the time component of the query\"\"\"\n pdf = self.python_date_format\n is_epoch = pdf in ('epoch_s', 'epoch_ms')\n if not self.expression and not time_grain and not is_epoch:\n return column(self.column_name, type_=DateTime).label(DTTM_ALIAS)\n\n expr = self.expression or self.column_name\n if is_epoch:\n # if epoch, translate to DATE using db specific conf\n db_spec = self.table.database.db_engine_spec\n if pdf == 'epoch_s':\n expr = db_spec.epoch_to_dttm().format(col=expr)\n elif pdf == 'epoch_ms':\n expr = db_spec.epoch_ms_to_dttm().format(col=expr)\n if time_grain:\n grain = self.table.database.grains_dict().get(time_grain)\n if grain:\n expr = grain.function.format(col=expr)\n return literal_column(expr, type_=DateTime).label(DTTM_ALIAS)\n\n @classmethod\n def import_obj(cls, i_column):\n def lookup_obj(lookup_column):\n return db.session.query(TableColumn).filter(\n TableColumn.table_id == lookup_column.table_id,\n TableColumn.column_name == lookup_column.column_name).first()\n return import_util.import_simple_obj(db.session, i_column, lookup_obj)\n\n def dttm_sql_literal(self, dttm):\n \"\"\"Convert datetime object to a SQL expression string\n\n If database_expression is empty, the internal dttm\n will be parsed as the string with the pattern that\n the user inputted (python_date_format)\n If database_expression is not empty, the internal dttm\n will be parsed as the sql sentence for the database to convert\n \"\"\"\n tf = self.python_date_format\n if self.database_expression:\n return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n elif tf:\n if tf == 'epoch_s':\n return str((dttm - datetime(1970, 1, 1)).total_seconds())\n elif tf == 'epoch_ms':\n return str((dttm - datetime(1970, 1, 1)).total_seconds() * 1000.0)\n return \"'{}'\".format(dttm.strftime(tf))\n else:\n s = self.table.database.db_engine_spec.convert_dttm(\n self.type or '', dttm)\n return s or \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n def get_metrics(self):\n # TODO deprecate, this is not needed since MetricsControl\n metrics = []\n M = SqlMetric # noqa\n quoted = self.column_name\n if self.sum:\n metrics.append(M(\n metric_name='sum__' + self.column_name,\n metric_type='sum',\n expression='SUM({})'.format(quoted),\n ))\n if self.avg:\n metrics.append(M(\n metric_name='avg__' + self.column_name,\n metric_type='avg',\n expression='AVG({})'.format(quoted),\n ))\n if self.max:\n metrics.append(M(\n metric_name='max__' + self.column_name,\n metric_type='max',\n expression='MAX({})'.format(quoted),\n ))\n if self.min:\n metrics.append(M(\n metric_name='min__' + self.column_name,\n metric_type='min',\n expression='MIN({})'.format(quoted),\n ))\n if self.count_distinct:\n metrics.append(M(\n metric_name='count_distinct__' + self.column_name,\n metric_type='count_distinct',\n expression='COUNT(DISTINCT {})'.format(quoted),\n ))\n return {m.metric_name: m for m in metrics}\n\n\nclass SqlMetric(Model, BaseMetric):\n\n \"\"\"ORM object for metrics, each table can have multiple metrics\"\"\"\n\n __tablename__ = 'sql_metrics'\n __table_args__ = (UniqueConstraint('table_id', 'metric_name'),)\n table_id = Column(Integer, ForeignKey('tables.id'))\n table = relationship(\n 'SqlaTable',\n backref=backref('metrics', cascade='all, delete-orphan'),\n foreign_keys=[table_id])\n expression = Column(Text)\n\n export_fields = (\n 'metric_name', 'verbose_name', 'metric_type', 'table_id', 'expression',\n 'description', 'is_restricted', 'd3format', 'warning_text')\n update_from_object_fields = list([\n s for s in export_fields if s not in ('table_id', )])\n export_parent = 'table'\n\n def get_sqla_col(self, label=None):\n db_engine_spec = self.table.database.db_engine_spec\n label = db_engine_spec.make_label_compatible(label if label else self.metric_name)\n return literal_column(self.expression).label(label)\n\n @property\n def perm(self):\n return (\n '{parent_name}.[{obj.metric_name}](id:{obj.id})'\n ).format(obj=self,\n parent_name=self.table.full_name) if self.table else None\n\n @classmethod\n def import_obj(cls, i_metric):\n def lookup_obj(lookup_metric):\n return db.session.query(SqlMetric).filter(\n SqlMetric.table_id == lookup_metric.table_id,\n SqlMetric.metric_name == lookup_metric.metric_name).first()\n return import_util.import_simple_obj(db.session, i_metric, lookup_obj)\n\n\nclass SqlaTable(Model, BaseDatasource):\n\n \"\"\"An ORM object for SqlAlchemy table references\"\"\"\n\n type = 'table'\n query_language = 'sql'\n metric_class = SqlMetric\n column_class = TableColumn\n\n __tablename__ = 'tables'\n __table_args__ = (UniqueConstraint('database_id', 'table_name'),)\n\n table_name = Column(String(250))\n main_dttm_col = Column(String(250))\n database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)\n fetch_values_predicate = Column(String(1000))\n user_id = Column(Integer, ForeignKey('ab_user.id'))\n owner = relationship(\n security_manager.user_model,\n backref='tables',\n foreign_keys=[user_id])\n database = relationship(\n 'Database',\n backref=backref('tables', cascade='all, delete-orphan'),\n foreign_keys=[database_id])\n schema = Column(String(255))\n sql = Column(Text)\n is_sqllab_view = Column(Boolean, default=False)\n template_params = Column(Text)\n\n baselink = 'tablemodelview'\n\n export_fields = (\n 'table_name', 'main_dttm_col', 'description', 'default_endpoint',\n 'database_id', 'offset', 'cache_timeout', 'schema',\n 'sql', 'params', 'template_params', 'filter_select_enabled')\n update_from_object_fields = [\n f for f in export_fields if f not in ('table_name', 'database_id')]\n export_parent = 'database'\n export_children = ['metrics', 'columns']\n\n sqla_aggregations = {\n 'COUNT_DISTINCT': lambda column_name: sa.func.COUNT(sa.distinct(column_name)),\n 'COUNT': sa.func.COUNT,\n 'SUM': sa.func.SUM,\n 'AVG': sa.func.AVG,\n 'MIN': sa.func.MIN,\n 'MAX': sa.func.MAX,\n }\n\n def __repr__(self):\n return self.name\n\n @property\n def connection(self):\n return str(self.database)\n\n @property\n def description_markeddown(self):\n return utils.markdown(self.description)\n\n @property\n def datasource_name(self):\n return self.table_name\n\n @property\n def link(self):\n name = escape(self.name)\n anchor = '<a target=\"_blank\" href=\"{self.explore_url}\">{name}</a>'\n return Markup(anchor.format(**locals()))\n\n @property\n def schema_perm(self):\n \"\"\"Returns schema permission if present, database one otherwise.\"\"\"\n return security_manager.get_schema_perm(self.database, self.schema)\n\n def get_perm(self):\n return (\n '[{obj.database}].[{obj.table_name}]'\n '(id:{obj.id})').format(obj=self)\n\n @property\n def name(self):\n if not self.schema:\n return self.table_name\n return '{}.{}'.format(self.schema, self.table_name)\n\n @property\n def full_name(self):\n return utils.get_datasource_full_name(\n self.database, self.table_name, schema=self.schema)\n\n @property\n def dttm_cols(self):\n l = [c.column_name for c in self.columns if c.is_dttm] # noqa: E741\n if self.main_dttm_col and self.main_dttm_col not in l:\n l.append(self.main_dttm_col)\n return l\n\n @property\n def num_cols(self):\n return [c.column_name for c in self.columns if c.is_num]\n\n @property\n def any_dttm_col(self):\n cols = self.dttm_cols\n if cols:\n return cols[0]\n\n @property\n def html(self):\n t = ((c.column_name, c.type) for c in self.columns)\n df = pd.DataFrame(t)\n df.columns = ['field', 'type']\n return df.to_html(\n index=False,\n classes=(\n 'dataframe table table-striped table-bordered '\n 'table-condensed'))\n\n @property\n def sql_url(self):\n return self.database.sql_url + '?table_name=' + str(self.table_name)\n\n def external_metadata(self):\n cols = self.database.get_columns(self.table_name, schema=self.schema)\n for col in cols:\n col['type'] = '{}'.format(col['type'])\n return cols\n\n @property\n def time_column_grains(self):\n return {\n 'time_columns': self.dttm_cols,\n 'time_grains': [grain.name for grain in self.database.grains()],\n }\n\n @property\n def select_star(self):\n # show_cols and latest_partition set to false to avoid\n # the expensive cost of inspecting the DB\n return self.database.select_star(\n self.name, show_cols=False, latest_partition=False)\n\n def get_col(self, col_name):\n columns = self.columns\n for col in columns:\n if col_name == col.column_name:\n return col\n\n @property\n def data(self):\n d = super(SqlaTable, self).data\n if self.type == 'table':\n grains = self.database.grains() or []\n if grains:\n grains = [(g.duration, g.name) for g in grains]\n d['granularity_sqla'] = utils.choicify(self.dttm_cols)\n d['time_grain_sqla'] = grains\n d['main_dttm_col'] = self.main_dttm_col\n return d\n\n def values_for_column(self, column_name, limit=10000):\n \"\"\"Runs query against sqla to retrieve some\n sample values for the given column.\n \"\"\"\n cols = {col.column_name: col for col in self.columns}\n target_col = cols[column_name]\n tp = self.get_template_processor()\n\n qry = (\n select([target_col.get_sqla_col()])\n .select_from(self.get_from_clause(tp))\n .distinct()\n )\n if limit:\n qry = qry.limit(limit)\n\n if self.fetch_values_predicate:\n tp = self.get_template_processor()\n qry = qry.where(tp.process_template(self.fetch_values_predicate))\n\n engine = self.database.get_sqla_engine()\n sql = '{}'.format(\n qry.compile(engine, compile_kwargs={'literal_binds': True}),\n )\n sql = self.mutate_query_from_config(sql)\n\n df = pd.read_sql_query(sql=sql, con=engine)\n return [row[0] for row in df.to_records(index=False)]\n\n def mutate_query_from_config(self, sql):\n \"\"\"Apply config's SQL_QUERY_MUTATOR\n\n Typically adds comments to the query with context\"\"\"\n SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')\n if SQL_QUERY_MUTATOR:\n username = utils.get_username()\n sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database)\n return sql\n\n def get_template_processor(self, **kwargs):\n return get_template_processor(\n table=self, database=self.database, **kwargs)\n\n def get_query_str(self, query_obj):\n qry = self.get_sqla_query(**query_obj)\n sql = self.database.compile_sqla_query(qry)\n logging.info(sql)\n sql = sqlparse.format(sql, reindent=True)\n if query_obj['is_prequery']:\n query_obj['prequeries'].append(sql)\n sql = self.mutate_query_from_config(sql)\n return sql\n\n def get_sqla_table(self):\n tbl = table(self.table_name)\n if self.schema:\n tbl.schema = self.schema\n return tbl\n\n def get_from_clause(self, template_processor=None):\n # Supporting arbitrary SQL statements in place of tables\n if self.sql:\n from_sql = self.sql\n if template_processor:\n from_sql = template_processor.process_template(from_sql)\n from_sql = sqlparse.format(from_sql, strip_comments=True)\n return TextAsFrom(sa.text(from_sql), []).alias('expr_qry')\n return self.get_sqla_table()\n\n def adhoc_metric_to_sqla(self, metric, cols):\n \"\"\"\n Turn an adhoc metric into a sqlalchemy column.\n\n :param dict metric: Adhoc metric definition\n :param dict cols: Columns for the current table\n :returns: The metric defined as a sqlalchemy column\n :rtype: sqlalchemy.sql.column\n \"\"\"\n expression_type = metric.get('expressionType')\n db_engine_spec = self.database.db_engine_spec\n label = db_engine_spec.make_label_compatible(metric.get('label'))\n\n if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']:\n column_name = metric.get('column').get('column_name')\n sqla_column = column(column_name)\n table_column = cols.get(column_name)\n\n if table_column:\n sqla_column = table_column.get_sqla_col()\n\n sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column)\n sqla_metric = sqla_metric.label(label)\n return sqla_metric\n elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']:\n sqla_metric = literal_column(metric.get('sqlExpression'))\n sqla_metric = sqla_metric.label(label)\n return sqla_metric\n else:\n return None\n\n def get_sqla_query( # sqla\n self,\n groupby, metrics,\n granularity,\n from_dttm, to_dttm,\n filter=None, # noqa\n is_timeseries=True,\n timeseries_limit=15,\n timeseries_limit_metric=None,\n row_limit=None,\n inner_from_dttm=None,\n inner_to_dttm=None,\n orderby=None,\n extras=None,\n columns=None,\n order_desc=True,\n prequeries=None,\n is_prequery=False,\n ):\n \"\"\"Querying any sqla table from this common interface\"\"\"\n template_kwargs = {\n 'from_dttm': from_dttm,\n 'groupby': groupby,\n 'metrics': metrics,\n 'row_limit': row_limit,\n 'to_dttm': to_dttm,\n 'filter': filter,\n 'columns': {col.column_name: col for col in self.columns},\n }\n template_kwargs.update(self.template_params_dict)\n template_processor = self.get_template_processor(**template_kwargs)\n db_engine_spec = self.database.db_engine_spec\n\n orderby = orderby or []\n\n # For backward compatibility\n if granularity not in self.dttm_cols:\n granularity = self.main_dttm_col\n\n # Database spec supports join-free timeslot grouping\n time_groupby_inline = db_engine_spec.time_groupby_inline\n\n cols = {col.column_name: col for col in self.columns}\n metrics_dict = {m.metric_name: m for m in self.metrics}\n\n if not granularity and is_timeseries:\n raise Exception(_(\n 'Datetime column not provided as part table configuration '\n 'and is required by this type of chart'))\n if not groupby and not metrics and not columns:\n raise Exception(_('Empty query?'))\n metrics_exprs = []\n for m in metrics:\n if utils.is_adhoc_metric(m):\n metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))\n elif m in metrics_dict:\n metrics_exprs.append(metrics_dict.get(m).get_sqla_col())\n else:\n raise Exception(_(\"Metric '{}' is not valid\".format(m)))\n if metrics_exprs:\n main_metric_expr = metrics_exprs[0]\n else:\n main_metric_expr = literal_column('COUNT(*)').label(\n db_engine_spec.make_label_compatible('count'))\n\n select_exprs = []\n groupby_exprs = []\n\n if groupby:\n select_exprs = []\n inner_select_exprs = []\n inner_groupby_exprs = []\n for s in groupby:\n col = cols[s]\n outer = col.get_sqla_col()\n inner = col.get_sqla_col(col.column_name + '__')\n\n groupby_exprs.append(outer)\n select_exprs.append(outer)\n inner_groupby_exprs.append(inner)\n inner_select_exprs.append(inner)\n elif columns:\n for s in columns:\n select_exprs.append(cols[s].get_sqla_col())\n metrics_exprs = []\n\n if granularity:\n dttm_col = cols[granularity]\n time_grain = extras.get('time_grain_sqla')\n time_filters = []\n\n if is_timeseries:\n timestamp = dttm_col.get_timestamp_expression(time_grain)\n select_exprs += [timestamp]\n groupby_exprs += [timestamp]\n\n # Use main dttm column to support index with secondary dttm columns\n if db_engine_spec.time_secondary_columns and \\\n self.main_dttm_col in self.dttm_cols and \\\n self.main_dttm_col != dttm_col.column_name:\n time_filters.append(cols[self.main_dttm_col].\n get_time_filter(from_dttm, to_dttm))\n time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))\n\n select_exprs += metrics_exprs\n qry = sa.select(select_exprs)\n\n tbl = self.get_from_clause(template_processor)\n\n if not columns:\n qry = qry.group_by(*groupby_exprs)\n\n where_clause_and = []\n having_clause_and = []\n for flt in filter:\n if not all([flt.get(s) for s in ['col', 'op']]):\n continue\n col = flt['col']\n op = flt['op']\n col_obj = cols.get(col)\n if col_obj:\n is_list_target = op in ('in', 'not in')\n eq = self.filter_values_handler(\n flt.get('val'),\n target_column_is_numeric=col_obj.is_num,\n is_list_target=is_list_target)\n if op in ('in', 'not in'):\n cond = col_obj.get_sqla_col().in_(eq)\n if '<NULL>' in eq:\n cond = or_(cond, col_obj.get_sqla_col() == None) # noqa\n if op == 'not in':\n cond = ~cond\n where_clause_and.append(cond)\n else:\n if col_obj.is_num:\n eq = utils.string_to_num(flt['val'])\n if op == '==':\n where_clause_and.append(col_obj.get_sqla_col() == eq)\n elif op == '!=':\n where_clause_and.append(col_obj.get_sqla_col() != eq)\n elif op == '>':\n where_clause_and.append(col_obj.get_sqla_col() > eq)\n elif op == '<':\n where_clause_and.append(col_obj.get_sqla_col() < eq)\n elif op == '>=':\n where_clause_and.append(col_obj.get_sqla_col() >= eq)\n elif op == '<=':\n where_clause_and.append(col_obj.get_sqla_col() <= eq)\n elif op == 'LIKE':\n where_clause_and.append(col_obj.get_sqla_col().like(eq))\n elif op == 'IS NULL':\n where_clause_and.append(col_obj.get_sqla_col() == None) # noqa\n elif op == 'IS NOT NULL':\n where_clause_and.append(\n col_obj.get_sqla_col() != None) # noqa\n if extras:\n where = extras.get('where')\n if where:\n where = template_processor.process_template(where)\n where_clause_and += [sa.text('({})'.format(where))]\n having = extras.get('having')\n if having:\n having = template_processor.process_template(having)\n having_clause_and += [sa.text('({})'.format(having))]\n if granularity:\n qry = qry.where(and_(*(time_filters + where_clause_and)))\n else:\n qry = qry.where(and_(*where_clause_and))\n qry = qry.having(and_(*having_clause_and))\n\n if not orderby and not columns:\n orderby = [(main_metric_expr, not order_desc)]\n\n for col, ascending in orderby:\n direction = asc if ascending else desc\n if utils.is_adhoc_metric(col):\n col = self.adhoc_metric_to_sqla(col, cols)\n qry = qry.order_by(direction(col))\n\n if row_limit:\n qry = qry.limit(row_limit)\n\n if is_timeseries and \\\n timeseries_limit and groupby and not time_groupby_inline:\n if self.database.db_engine_spec.inner_joins:\n # some sql dialects require for order by expressions\n # to also be in the select clause -- others, e.g. vertica,\n # require a unique inner alias\n inner_main_metric_expr = main_metric_expr.label('mme_inner__')\n inner_select_exprs += [inner_main_metric_expr]\n subq = select(inner_select_exprs)\n subq = subq.select_from(tbl)\n inner_time_filter = dttm_col.get_time_filter(\n inner_from_dttm or from_dttm,\n inner_to_dttm or to_dttm,\n )\n subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))\n subq = subq.group_by(*inner_groupby_exprs)\n\n ob = inner_main_metric_expr\n if timeseries_limit_metric:\n if utils.is_adhoc_metric(timeseries_limit_metric):\n ob = self.adhoc_metric_to_sqla(timeseries_limit_metric, cols)\n elif timeseries_limit_metric in metrics_dict:\n timeseries_limit_metric = metrics_dict.get(\n timeseries_limit_metric,\n )\n ob = timeseries_limit_metric.get_sqla_col()\n else:\n raise Exception(_(\"Metric '{}' is not valid\".format(m)))\n direction = desc if order_desc else asc\n subq = subq.order_by(direction(ob))\n subq = subq.limit(timeseries_limit)\n\n on_clause = []\n for i, gb in enumerate(groupby):\n on_clause.append(\n groupby_exprs[i] == column(gb + '__'))\n\n tbl = tbl.join(subq.alias(), and_(*on_clause))\n else:\n # run subquery to get top groups\n subquery_obj = {\n 'prequeries': prequeries,\n 'is_prequery': True,\n 'is_timeseries': False,\n 'row_limit': timeseries_limit,\n 'groupby': groupby,\n 'metrics': metrics,\n 'granularity': granularity,\n 'from_dttm': inner_from_dttm or from_dttm,\n 'to_dttm': inner_to_dttm or to_dttm,\n 'filter': filter,\n 'orderby': orderby,\n 'extras': extras,\n 'columns': columns,\n 'order_desc': True,\n }\n result = self.query(subquery_obj)\n dimensions = [c for c in result.df.columns if c not in metrics]\n top_groups = self._get_top_groups(result.df, dimensions)\n qry = qry.where(top_groups)\n\n return qry.select_from(tbl)\n\n def _get_top_groups(self, df, dimensions):\n cols = {col.column_name: col for col in self.columns}\n groups = []\n for unused, row in df.iterrows():\n group = []\n for dimension in dimensions:\n col_obj = cols.get(dimension)\n group.append(col_obj.get_sqla_col() == row[dimension])\n groups.append(and_(*group))\n\n return or_(*groups)\n\n def query(self, query_obj):\n qry_start_dttm = datetime.now()\n sql = self.get_query_str(query_obj)\n status = QueryStatus.SUCCESS\n error_message = None\n df = None\n try:\n df = self.database.get_df(sql, self.schema)\n except Exception as e:\n status = QueryStatus.FAILED\n logging.exception(e)\n error_message = (\n self.database.db_engine_spec.extract_error_message(e))\n\n # if this is a main query with prequeries, combine them together\n if not query_obj['is_prequery']:\n query_obj['prequeries'].append(sql)\n sql = ';\\n\\n'.join(query_obj['prequeries'])\n sql += ';'\n\n return QueryResult(\n status=status,\n df=df,\n duration=datetime.now() - qry_start_dttm,\n query=sql,\n error_message=error_message)\n\n def get_sqla_table_object(self):\n return self.database.get_table(self.table_name, schema=self.schema)\n\n def fetch_metadata(self):\n \"\"\"Fetches the metadata for the table and merges it in\"\"\"\n try:\n table = self.get_sqla_table_object()\n except Exception:\n raise Exception(_(\n \"Table [{}] doesn't seem to exist in the specified database, \"\n \"couldn't fetch column information\").format(self.table_name))\n\n M = SqlMetric # noqa\n metrics = []\n any_date_col = None\n db_dialect = self.database.get_dialect()\n dbcols = (\n db.session.query(TableColumn)\n .filter(TableColumn.table == self)\n .filter(or_(TableColumn.column_name == col.name\n for col in table.columns)))\n dbcols = {dbcol.column_name: dbcol for dbcol in dbcols}\n db_engine_spec = self.database.db_engine_spec\n\n for col in table.columns:\n try:\n datatype = col.type.compile(dialect=db_dialect).upper()\n except Exception as e:\n datatype = 'UNKNOWN'\n logging.error(\n 'Unrecognized data type in {}.{}'.format(table, col.name))\n logging.exception(e)\n dbcol = dbcols.get(col.name, None)\n if not dbcol:\n dbcol = TableColumn(column_name=col.name, type=datatype)\n dbcol.groupby = dbcol.is_string\n dbcol.filterable = dbcol.is_string\n dbcol.sum = dbcol.is_num\n dbcol.avg = dbcol.is_num\n dbcol.is_dttm = dbcol.is_time\n else:\n dbcol.type = datatype\n self.columns.append(dbcol)\n if not any_date_col and dbcol.is_time:\n any_date_col = col.name\n metrics += dbcol.get_metrics().values()\n\n metrics.append(M(\n metric_name='count',\n verbose_name='COUNT(*)',\n metric_type='count',\n expression='COUNT(*)',\n ))\n if not self.main_dttm_col:\n self.main_dttm_col = any_date_col\n for metric in metrics:\n metric.metric_name = db_engine_spec.mutate_expression_label(\n metric.metric_name)\n self.add_missing_metrics(metrics)\n db.session.merge(self)\n db.session.commit()\n\n @classmethod\n def import_obj(cls, i_datasource, import_time=None):\n \"\"\"Imports the datasource from the object to the database.\n\n Metrics and columns and datasource will be overrided if exists.\n This function can be used to import/export dashboards between multiple\n superset instances. Audit metadata isn't copies over.\n \"\"\"\n def lookup_sqlatable(table):\n return db.session.query(SqlaTable).join(Database).filter(\n SqlaTable.table_name == table.table_name,\n SqlaTable.schema == table.schema,\n Database.id == table.database_id,\n ).first()\n\n def lookup_database(table):\n return db.session.query(Database).filter_by(\n database_name=table.params_dict['database_name']).one()\n return import_util.import_datasource(\n db.session, i_datasource, lookup_database, lookup_sqlatable,\n import_time)\n\n @classmethod\n def query_datasources_by_name(\n cls, session, database, datasource_name, schema=None):\n query = (\n session.query(cls)\n .filter_by(database_id=database.id)\n .filter_by(table_name=datasource_name)\n )\n if schema:\n query = query.filter_by(schema=schema)\n return query.all()\n\n @staticmethod\n def default_query(qry):\n return qry.filter_by(is_sqllab_view=False)\n\n\nsa.event.listen(SqlaTable, 'after_insert', security_manager.set_perm)\nsa.event.listen(SqlaTable, 'after_update', security_manager.set_perm)\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_sql_query"
]
]
|
JulienPascal/master-s_thesis | [
"f697958b9a2f16642139301f20e1cf8f42328ddf"
]
| [
"src/Calculate_volatility_wages.py"
]
| [
"##########################################################\n# Calculate the volatility of the wages deciles in the US\n##########################################################\n# Use data from Jonathan Heathcote, Fabrizio Perri, Giovanni L. Violante\n# Unequal We Stand: An Empirical Analysis of Economic Inequality in the United States, 1967-2006\n# http://www.nber.org/papers/w15483\n\n# I follow the procedure described by Robin 2011 in \n# http://onlinelibrary.wiley.com/doi/10.3982/ECTA9070/abstract\n\nimport numpy as np\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom more_itertools import unique_everseen\nimport pylab\nimport matplotlib.dates as mdates\nfrom math import exp, log\nimport math\nfrom datetime import date, timedelta as td\nimport statsmodels.api as sm\nimport matplotlib.cm as cm\nfrom tabulate import tabulate\nimport csv\n\npath = '/home/julien/master-s_thesis/data' #path to the data folder\npath_figure = '/home/julien/master-s_thesis/figures/' #where to save the figures\npath_table = '/home/julien/master-s_thesis/tables/' #where to save the tables\nos.chdir(path) #locate in the data folder\n\n######################\n# I. Data 2000 to 2015\n######################\n\ndf = pd.read_csv(\"LEU.csv\")\n\nstarting_year = 2000 #first quarter 1951\nstarting_month = 1\nstarting_date = date(starting_year, starting_month, 1)\n\nstop_year = 2015 #third quarter 2010\nstop_month = 10\nstop_date = date(stop_year, stop_month, 1)\n\n# 1. Create a column with dates objects:\ndf['Date'] = 0\nfor i in range(0,len(df['Year'])):\n\tdf.ix[i,'Date'] = date(df.ix[i,'Year'], df.ix[i,'Month'], 1)\n\n#keep the selected time period: \ndf = df.loc[df['Date'] <= stop_date]\ndf = df.loc[df['Date'] >= starting_date]\n\n#Create\ndateList = [] \nfor index, row in df.iterrows():\n\tprint(index)\n\tdateList.append(date(df.ix[index,'Year'], df.ix[index,'Month'], 1))\n\n# Usual weekly earnings - in current dollars, first decile, both sexes: LEU0252911200\nFirst_decile = df['LEU0252911200'].values\n\n# Usual weekly earnings, first quartile, Employed full time, Wage and salary workers LEU0252911300\nFirst_quartile = df['LEU0252911300'].values\n\n#Median usual weekly earnings (second quartile), Employed full time, Wage and salary workers LEU0252881500\nMedian = df['LEU0252881500'].values\n\n# Usual weekly earnings, third quartile, Employed full time, Wage and salary workers LEU0252911400\nThird_quartile = df['LEU0252911400'].values\n\n# Usual weekly earnings, ninth decile, Employed full time, Wage and salary workers LEU0252911500\nNinth_decile = df['LEU0252911500'].values\n\n##################\n# Plot the series:\n##################\nplt.plot(dateList, First_decile, color = 'b')\nplt.plot(dateList, First_quartile, 'k')\nplt.plot(dateList, Median, color = 'r')\nplt.plot(dateList, Third_quartile, color = 'g')\nplt.plot(dateList, Ninth_decile, color = 'navy')\n\nplt.title('Wages')\nplt.legend(['First decile', 'First quartile', 'Median', 'Third quartile', 'Ninth decile'], loc='best', fancybox = True, framealpha=0.5)\nplt.savefig(path_figure + 'Wages_2000_2015')\nplt.show()\n\n######################\n# Plot ratio of series\n######################\nplt.plot(dateList, Ninth_decile/First_decile, color = 'b')\nplt.plot(dateList, Median/First_decile, 'k')\nplt.plot(dateList, Ninth_decile/Median, color = 'r')\nplt.legend(['P90/P10','P50/P10','P90/P50'] , loc='best', fancybox = True, framealpha=0.2)\nplt.title('Wages')\nplt.savefig(path_figure + 'Wage_ratios_2000_2015')\nplt.show()\n\n\n#############################\n# Plot the log of the series:\n#############################\nlog_First_decile = np.log(First_decile)\nlog_First_quartile = np.log(First_quartile)\nlog_Median = np.log(Median)\nlog_Third_quartile = np.log(Third_quartile)\nlog_Ninth_decile = np.log(Ninth_decile)\n\nplt.plot(dateList, log_First_decile, color = 'b')\nplt.plot(dateList, log_First_quartile , 'k')\nplt.plot(dateList, log_Median, color = 'r')\nplt.plot(dateList, log_Third_quartile, color = 'g')\nplt.plot(dateList, log_Ninth_decile , color = 'navy')\nplt.title('Log Wages')\nplt.savefig('Log_Wages_2000_2015')\nplt.show()\n\n#####################\n#Plot ratios of logs\nplt.plot(dateList, log_Ninth_decile/log_First_decile, color = 'b')\nplt.plot(dateList, log_Median/log_First_decile, 'k')\nplt.plot(dateList, log_Ninth_decile/log_Median, color = 'r')\nplt.legend(['P90/P10','P50/P10','P90/P50'] , loc='best', fancybox = True, framealpha=0.2)\nplt.title('Log Wages Ratios')\nplt.savefig(path_figure + 'Log_Wage_ratios_2000_2015')\nplt.show()\n\n\n#####################################\n# Get rid of a linear trend on wages:\n#####################################\nz = np.polyfit(df['Year'].values, log_First_decile, 1)\np = np.poly1d(z)\ndetrended_log_First_decile = log_First_decile - p(df['Year'].values) + np.mean(p(df['Year'].values)) #remove the trend and add the mean\n\nz = np.polyfit(df['Year'].values, log_First_quartile, 1)\np = np.poly1d(z)\ndetrended_log_First_quartile = log_First_quartile - p(df['Year'].values) + np.mean(p(df['Year'].values))\n\nz = np.polyfit(df['Year'].values, log_Median, 1)\np = np.poly1d(z)\ndetrended_log_Median = log_Median - p(df['Year'].values) + np.mean(p(df['Year'].values))\n\nz = np.polyfit(df['Year'].values, log_Third_quartile, 1)\np = np.poly1d(z)\ndetrended_log_Third_quartile = log_Third_quartile - p(df['Year'].values) + np.mean(p(df['Year'].values))\n \nz = np.polyfit(df['Year'].values, log_Ninth_decile, 1)\np = np.poly1d(z)\ndetrended_log_Ninth_decile = log_Ninth_decile - p(df['Year'].values) + np.mean(p(df['Year'].values))\n\n#############################\n# Plot detrented wage ratios:\n#############################\n# P90/P10: 90th to 10th percentile ratio\nP_90_to_P10 = detrended_log_Ninth_decile/detrended_log_First_decile\n\n# P50/P10\nP_50_to_P10 = detrended_log_Median/detrended_log_First_decile\n\n# P90/P50\nP_90_to_50 = detrended_log_Ninth_decile/detrended_log_Median\n\nplt.plot(df['Year'].values, P_90_to_P10)\nplt.plot(df['Year'].values, P_50_to_P10)\nplt.plot(df['Year'].values, P_90_to_50)\nplt.legend(['P90/P10','P50/P10','P90/P50'] , loc='best', fancybox = True, framealpha=0.2)\nplt.savefig(path_figure + 'delinearized_log_wage_decile_ratios_2000_2015')\nplt.show()\n\n######################################\n# Standard deviations of wage deciles;\n######################################\nprint(np.std(detrended_log_First_decile))\nprint(np.std(detrended_log_First_quartile))\nprint(np.std(detrended_log_Median))\nprint(np.std(detrended_log_Third_quartile))\nprint(np.std(detrended_log_Ninth_decile))\n\n########################\n# II. Data 1967 to 2005:\n########################\n\ndf2 = pd.read_csv(\"individ_pctiles_work_hrs_wage.csv\")\n\n#Plot the deciles with the trend:\n\nlegend_list = []\n#list_color = color_list = plt.cm.Set3(np.linspace(0, 1,13))\na = 0\ncmap = plt.cm.Accent\nline_colors = cmap(np.linspace(0,1,9)) \n\nfor i in np.arange(10, 100, 10):\n\tvariable = 'avg_wage_%s' %i\n\tlegend_name = '%sth percentile' %i\n\tplt.plot(df2['true_year'].values, df2[variable], color = line_colors[a])\n\tlegend_list.append(legend_name)\n\ta=a+1\n\nplt.title('Dynamics of wage deciles')\nplt.savefig(path_figure + 'trended_wage_deciles')\nplt.legend(legend_list, loc='best', fancybox = True, framealpha=0.2)\nplt.show()\n\n#######################################\n# A. HP Filter the log of the deciles :\n#######################################\n\nsmoothing_parameter = 100 #Paramter for yearly data\n\ncycle_avg_wage = {}\ntrend_avg_wage = {}\ndetrended_avg_wage = {}\n\n# Decompose the trend and the cycle:\nfor i in np.arange(10, 100, 10):\n\tvariable = 'avg_wage_%s' %i\n\tcycle_avg_wage[i], trend_avg_wage[i] = sm.tsa.filters.hpfilter(np.log(df2[variable]), smoothing_parameter)\n\tdetrended_avg_wage[i] = np.exp(cycle_avg_wage[i] + np.mean(trend_avg_wage[i])) # add the mean of the trend and take the exponential \n\n\n#Plot the trend:\na = 0\nfor i in np.arange(10, 100, 10):\n\tplt.plot(df2['true_year'].values, trend_avg_wage[i], color = line_colors[a])\n\ta=a+1\n\nplt.legend(legend_list, loc='best', fancybox = True, framealpha=0.2)\nplt.title('Trend in wage deciles')\nplt.savefig(path_figure + 'Trend_wage_deciles')\nplt.show()\n\n#Plot the cycle component\na = 0\nfor i in np.arange(10, 100, 10):\n\tplt.plot(df2['true_year'].values, cycle_avg_wage[i], color = line_colors[a])\n\ta=a+1\n\nplt.title('Cycle component in wage deciles')\nplt.savefig(path_figure + 'Cycle_wage_deciles')\nplt.legend(legend_list, loc='best', fancybox = True, framealpha=0.2)\nplt.show()\n\n#Plot the untrended data:\na = 0\nfor i in np.arange(10, 100, 10):\n\tplt.plot(df2['true_year'].values, detrended_avg_wage[i], color = line_colors[a])\n\ta=a+1\n\nplt.title('Detrended wage deciles HP Filter smoothing parameter = 100')\nplt.savefig(path_figure + 'Detrended_wage_deciles')\nplt.legend(legend_list, loc='best', fancybox = True, framealpha=0.2)\nplt.show()\n\n# Table for the volatility of the HP filtered deciles:\ntable = [] #create a table to store the volatility\na = 0\nfor i in np.arange(10, 100, 10):\n\tvariable = '%sth Decile' %i\n\tprint(np.std(detrended_avg_wage[i]))\n\ttable.append([variable, np.std(detrended_avg_wage[i])])\n\ta=a+1\n\n\n# Output table of standard errors of detrended data in latek:\nprint(tabulate(table, headers=['Decile', 'Volatility'], tablefmt=\"latex\"))\n\n#save the table in a csv format:\nwith open(path_table + 'table_volatility_detrended_wages.csv', 'w') as csvfile:\n\twriter = csv.writer(csvfile)\n\t[writer.writerow(r) for r in table]\n\n\n#####################################################\n# B. Remove a linear trend, as in the original paper:\n#####################################################\n\nlinear_trend_avg_wage = {}\ndelinear_avg_wage = {}\ndeviation_from_linear_trend = {}\n\nfor i in np.arange(10, 100, 10):\n\tvariable = 'avg_wage_%s' %i\n\tlog_wage_decile = np.log(df2[variable]) #take the log\n\tz = np.polyfit(df2['true_year'].values, log_wage_decile , 1) #calculate the least squares line\n\tp = np.poly1d(z)\n\tlinear_trend_avg_wage[i] = p(df2['true_year'])\n\tdelinear_avg_wage[i] = np.exp(log_wage_decile - linear_trend_avg_wage[i] + np.mean(linear_trend_avg_wage[i])) # add the mean of the trend and take the exponential \n\tdeviation_from_linear_trend[i] = np.divide(log_wage_decile - linear_trend_avg_wage[i],linear_trend_avg_wage[i])\n\n# Plot the untrended data:\na = 0\nfor i in np.arange(10, 100, 10):\n\tplt.plot(df2['true_year'].values, delinear_avg_wage[i], color = line_colors[a])\n\ta=a+1\n\n#plt.title('Detrended wage deciles, removing a linear trend')\nplt.legend(legend_list, loc='best', fancybox = True, framealpha=0.2)\nplt.savefig(path_figure + 'delinearized_wage_deciles')\nplt.show()\n\n#Volatility of the delinearized deciles:\ntable = [] #create a table to store the volatility\nlist_volatility = []\nlabel_volatility = []\nind = []\na = 0\nfor i in np.arange(10, 100, 10):\n\tvariable = '%sth Decile' %i\n\tprint(np.std(delinear_avg_wage[i]))\n\ttable.append([variable, np.std(delinear_avg_wage[i])])\n\tlist_volatility.append(np.std(delinear_avg_wage[i]))\n\tlabel_volatility.append(variable)\n\tind.append(a)\n\ta=a+1\n\n# Output table of standard errors of delinearized data in latek:\nprint(tabulate(table, headers=['Decile', 'Volatility'], tablefmt=\"latex\"))\n\n# Make a graph out of it:\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind, list_volatility, color='grey', alpha = 0.3)\n\n#plt.plot(ind, list_volatility)\nplt.xticks(ind, label_volatility)\nplt.show()\n\n\n#save the table in a csv format:\nwith open(path_table + 'table_volatility_delinearized_wages.csv', 'w') as csvfile:\n\twriter = csv.writer(csvfile)\n\t[writer.writerow(r) for r in table]\n\n#######################################\n# Plot deviations from the linear trend\n#######################################\na = 0\nfor i in np.arange(10, 100, 10):\n\tplt.plot(df2['true_year'].values, deviation_from_linear_trend[i], color = line_colors[a])\n\ta=a+1\n#plt.title('Deviations of wage deciles from linear trend')\nplt.legend(legend_list, loc='best', fancybox = True, framealpha=0.2)\nplt.savefig(path_figure + 'Deviations_wages_deciles_from_linear_trend')\nplt.show()\n\n#####################\n# Plot earning ratios\n#####################\n\n# P90/P10: 90th to 10th percentile ratio\nP_90_to_P10 = delinear_avg_wage[90]/delinear_avg_wage[10] #indexing starts at 0\n\n# P50/P10\nP_50_to_P10 = delinear_avg_wage[50]/delinear_avg_wage[10]\n\n# P90/P50\nP_90_to_50 = delinear_avg_wage[90]/delinear_avg_wage[50]\n\nplt.plot(df2['true_year'].values, P_90_to_P10, color = line_colors[1])\nplt.plot(df2['true_year'].values, P_50_to_P10, color = line_colors[2])\nplt.plot(df2['true_year'].values, P_90_to_50, color = line_colors[7])\nplt.legend(['P90/P10','P50/P10','P90/P50'] , loc='best', fancybox = True, framealpha=0.2)\nplt.savefig(path_figure + 'delinearized_wage_decile_ratios')\nplt.show()\n\n#######################################################\n# III. Plot earning by educational attainment 2000-2015\n#######################################################\n\ndf_wage = pd.read_csv(\"LEU_wage_education.csv\")\n\nW = df_wage.loc[df_wage['Series ID'] == 'LEU0252887700','Value'] #Median wage for every type of educational attainment\nW1 = df_wage.loc[df_wage['Series ID'] == 'LEU0252916700','Value']#Median wage for Less than a high school diploma\nW2 = df_wage.loc[df_wage['Series ID'] == 'LEU0252917300','Value']#Median wage for High school graduates, no college\nW3 = df_wage.loc[df_wage['Series ID'] == 'LEU0254929400','Value']#Median wage for Some college or associate degree\nW4 = df_wage.loc[df_wage['Series ID'] == 'LEU0252918500','Value'] #Median wage for Bachelor's degree or higher\n\n###############\n# Scatter point\n##############\nplt.scatter(W, W1, color = 'b', alpha=0.5)\nplt.scatter(W, W2, color = 'k', alpha=0.5)\nplt.scatter(W, W3, color = 'r', alpha=0.5)\nplt.scatter(W, W4, color = 'g', alpha=0.5)\n\n#plt.scatter(Unemployment_rate_selected_years, degree_line, color = 'grey', alpha=0.2)\nplt.legend(['Less than a High School Diploma', 'With a High School Diploma', 'Some College or Associate Degree','Bachelors Degree and Higher'], loc='upper left', fancybox = True, framealpha=0.5)\nplt.xlabel('Median usual weekly earnings - in current dollars')\nplt.ylabel('Median usual weekly earnings by educational attainment')\nplt.savefig(path_figure + 'Overall_vs_group_edu_median_weekly_earning')\nplt.show()\n\n########\n# Lines\n#######\n\ndateListWage = []\nfor i in range(0,len(W)):\n dateListWage.append(date(df_wage.loc[df_wage['Series ID'] == 'LEU0252887700','Year'].values[i],df_wage.loc[df_wage['Series ID'] == 'LEU0252887700','Month'].values[i], 1))\n\n\nplt.plot(dateListWage, W1, color = 'b')\nplt.plot(dateListWage, W2, '--k')\nplt.plot(dateListWage, W3, color = 'r')\nplt.plot(dateListWage, W4, color = 'g')\n\n#fill in between:\nplt.fill_between(dateListWage, 0, W1, color='b', alpha=0.2)\nplt.fill_between(dateListWage, W1, W2, color='k', alpha=0.2)\nplt.fill_between(dateListWage, W2, W3, color='r', alpha=0.2)\nplt.fill_between(dateListWage, W3, W4, color='g', alpha=0.2)\n\n#plt.title('Unemployment educational attainment')\nplt.ylabel('Median usual weekly earnings in current dollars')\nplt.legend(['Less than a High School Diploma', 'With a High School Diploma', 'Some College or Associate Degree','Bachelors Degree and Higher'], loc='upper left', fancybox = True, framealpha=0.5)\nplt.savefig(path_figure + 'Wages_by_education')\nplt.show()\n\n"
]
| [
[
"numpy.mean",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"numpy.divide",
"numpy.log",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.polyfit",
"matplotlib.pyplot.fill_between",
"numpy.poly1d",
"matplotlib.pyplot.title",
"numpy.std",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"numpy.linspace"
]
]
|
jotelha/smampppp | [
"729e4733b436e68adfe07bcaa39a47727d0c8dd8"
]
| [
"fitESPconstrained.py"
]
| [
"#!/usr/bin/env python\n\"\"\" Fits (united-atom) point charges onto (all-atom) ESP obtained by \n GPAW and HORTON under certain charge group and symmetry constraints \n as required by GROMOS force fields \"\"\"\n# script which should read in hortons old cost function and add new constraints\n\n# PLAN:\n# Write functions which read input files for constraints and use them to\n# construct the new cost function. DONE\n# After reading in and construction of the\n# cost function, solve it and return the fitted charges. DONE\n\n# ENHANCEMENT:\n# Check if the constraints are applied properly or if there occured an error. DONE\n# Check if the constraints are meaningfull or if they exclude each other...\n\n# TODO:\n# maybe write a function which can visualize the symmetry constraints\n# to proof the input. Highlight atoms which are constrained or sth.\n# like this. DONE IN TEXT FORM\n\nimport warnings\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nimport ase.io\nimport parmed as pmd\nfrom parmed import gromacs\nfrom insertHbyList import insertHbyList \n\n\n\n### Function definitions ###\n\ndef unconstrainedMinimize(A_matrix, b_vector, C_scalar, debug = False):\n \"\"\"\n Minimization of the cost function.\n Parameters\n ----------\n A_matrix, b_vector, C_scalar:\n from hortons esp cost function. x A x^T - 2 B x + C\n debug: bool\n True: print debug information\n False: dont print debug information\n default=False\n\n Return\n ------\n x: M+N array\n Vector of M optimized charges and N lagrange multipliers\n A: numpy.ndarray\n matrix of horton minimization and lagrange condition\n B: numpy.array\n array of horton and lagrange conditions\n \"\"\"\n\n N = b_vector.shape[0]\n #M = q_vector.shape[0]\n\n npv = float(np.version.version[2:])\n\n if debug:\n logging.info(\"A {}: \\n {}\".format(A_matrix.shape,A_matrix))\n logging.info(\"B {}: \\n {}\".format(b_vector.shape,b_vector))\n logging.info(\"C {}: \\n {}\".format(C_scalar.shape,C_scalar))\n \n A = A_matrix\n B = b_vector\n C = C_scalar\n\n x = np.linalg.solve(A, B)\n\n return x, A, B\n\ndef constrainedMinimize(A_matrix, b_vector, C_scalar, D_matrix = None,\n q_vector = np.array([0]), debug = False):\n \"\"\"\n Minimization of the cost function.\n Parameters\n ----------\n A_matrix, b_vector, C_scalar:\n from hortons esp cost function. x A x^T - 2 B x + C\n D_matrix:\n matrix of additional constraints\n q_vector:\n vector of constants which should be fulfilled by the additional\n constraints.\n debug: bool\n True: print debug information\n False: dont print debug information\n default=False\n\n Return\n ------\n x: M+N array\n Vector of M optimized charges and N lagrange multipliers\n A: numpy.ndarray\n matrix of horton minimization and lagrange condition\n B: numpy.array\n array of horton and lagrange conditions\n\n TODO\n ----\n --> check the Lagrange multipliers for consistency, e.g. too large\n Lagrange multipliers can be a hint for not fulfilled constraints?!\n --> for old numpy versions, version < 1.13.0, there is no np.block\n Instead use np.bmat in the same way...\n \"\"\"\n \n logging.debug(\"\")\n logging.debug(\"### constrainedMinimize ###\")\n logging.debug(\"\")\n\n N = b_vector.shape[0]\n M = q_vector.shape[0]\n\n npv = float(np.version.version[2:])\n\n if not isinstance(D_matrix,np.ndarray):\n D_matrix = np.atleast_2d( np.ones(N) )\n \n #if debug:\n logging.debug(\"{:d} unknowns, {:d} equality constraints\".format(N,M))\n logging.debug(\"A {}: \\n {}\".format(A_matrix.shape,A_matrix))\n logging.debug(\"B {}: \\n {}\".format(b_vector.shape,b_vector))\n logging.debug(\"C {}: \\n {}\".format(C_scalar.shape,C_scalar))\n logging.debug(\"D {}: \\n {}\".format(D_matrix.shape,D_matrix))\n logging.debug(\"q {}: \\n {}\".format(q_vector.shape,q_vector))\n if npv < 13:\n logging.info('\\nWARNING:\\n Your numpy version {} is old,\\n I am falling from '\n 'np.block() back to np.bmat()\\n\\n'.format(np.version.version) )\n A = np.bmat([[ 2*np.atleast_2d(A_matrix), np.atleast_2d(D_matrix).T ],\n [ np.atleast_2d(D_matrix), np.atleast_2d(np.zeros((M,M)))]])\n\n else:\n #maybe combine these steps to one np.block operation like for bmat\n A_upper = np.block(\n [ 2*np.atleast_2d(A_matrix), np.atleast_2d(D_matrix).T ])\n A_lower = np.block(\n [ np.atleast_2d(D_matrix), np.atleast_2d(np.zeros((M,M)))])\n A = np.block( [ [ A_upper ], [ A_lower ] ] )\n\n logging.debug(\"block A ({}): \\n {}\".format(A.shape,A))\n\n if npv < 13:\n B = np.bmat( [2*np.atleast_1d(b_vector), np.atleast_1d(q_vector)] ).T\n\n else:\n B = np.block( [2*np.atleast_1d(b_vector), np.atleast_1d(q_vector)] )\n\n logging.debug(\"block B ({}): \\n {}\".format(B.shape,B))\n\n C = C_scalar\n \n rank_A = np.linalg.matrix_rank(A)\n rank_AB = np.linalg.matrix_rank(np.hstack((A,np.atleast_2d(B).T))) \n if rank_A == rank_AB:\n logging.info(\"A rank == [A,B] rank == {:d}, solvable\".format(rank_A))\n if rank_A == A.shape[0]:\n logging.info(\"A rank == A dim, unique solution exists.\")\n else:\n logging.info(\"A rank < A dim {:d}, no unique solution.\".format(A.shape[0]))\n else:\n logging.info(\"A rank {:d} != [A,B] rank {:d}, unsolvable\".format(\n rank_A, rank_AB))\n\n # What does numpy do for ambiguous systems?\n x = np.linalg.solve(A, B)\n\n return x, A, B\n\n\ndef constructPairwiseSymmetryConstraints(charges, N, symmetry=1.0, debug=False):\n \"\"\"\n Function to construct D_matrix and q_vector for a pairwise symmetry\n\n Parameters:\n -----------\n charges: (1D or 2D numpy.ndarray int) \n or (list of int) or (list of list of int)\n List of ASE charge indices which should be equal. \n For more than two charges\n there will be (N-1) constraints for pairwise equal charges. If you give\n a list of lists with equal charges each sublist will be forced to have\n equal charges.\n N: int\n Total number of atoms in your system. Needed to fix the size of the\n D_matrix and q_vector to the system size (number of atoms)\n symmetry: int\n 1: the pairwise charges are equal (symmetric), q_1 = q_2\n -1: the pairwise charges are the negative of each other (antisymmetric),\n q_1 = -q_2\n default=1\n debug: bool\n True: print debug information\n False: dont print debug information\n default=False\n\n Return\n ------\n D: np.ndarray, dim=2\n D_matrix which carries the constraints of pairwise symmetric charges\n q: np.array\n q_vector carying the total charge of q_1+q_2 (always zero)\n\n TODO\n ----\n implement 2D charge array such that one can input at once all pairwise\n symmetries with an symmetry array. DONE\n \"\"\"\n\n \n logging.debug(\"\")\n logging.debug(\"### constructPairwiseSymmetryConstraints ###\")\n logging.debug(\"\")\n \n #charges = np.atleast_2d(charges)\n #D = np.ones((1, N))\n #q = np.ones((1))\n D = []\n q = []\n \n # loop over sets of equally charged atoms\n for charge_list in charges:\n M = len(charge_list)-1 # number of pairwise constraints for the current list\n\n #symmetry = symmetry*np.ones(M)\n\n logging.debug(\"charge list ({}): {}\".format(charge_list.shape,charge_list))\n logging.debug(\"{:d} unknowns, {:d} pairwise equality constraints\".format(N,M))\n logging.debug(\"symmetry: {}\".format(symmetry))\n\n D_single = np.atleast_2d(np.zeros((M,N)))\n q_single = np.atleast_1d(np.zeros(M))\n D_single[:,charge_list[0]] = 1 # 1st atom in list used in every constraint\n\n for j in range(M):\n D_single[j,charge_list[j+1]] = -1.0*symmetry\n\n logging.debug(\"D_single ({}):\\n{}\".format(D_single.shape,D_single))\n logging.debug(\"q_single ({}): {}\".format(q_single.shape,q_single))\n\n #add up D_single and q_single to D and q\n D.append(D_single)\n q.append(q_single)\n \n D,q = concatenated_constraints(D,q)\n return D, q\n\n### old constructPairwiseSymmetryConstraints ###\n#def constructPairwiseSymmetryConstraints(charges, N, symmetry=1.0, debug=False):\n# \"\"\"\n# Function to construct D_matrix and q_vector for a pairwise symmetry\n#\n# Parameters:\n# -----------\n# charges: list of ints\n# List of charge indices which should be equal. For more than two charges\n# there will be (N-1) constraints for pairwise equal charges.\n# N: int\n# Total number of atoms in your system. Needed to fix the size of the\n# D_matrix and q_vector to the system size (number of atoms)\n# symmetry: int, -1 or 1\n# 1: the pairwise charges are equal (symmetric), q_1 = q_2\n# -1: the pairwise charges are the negative of each other (antisymmetric),\n# q_1 = -q_2\n# default=1\n# debug: bool\n# True: print debug information\n# False: dont print debug information\n# default=False\n#\n# Return\n# ------\n# D: np.ndarray, dim=2\n# D_matrix which carries the constraints of pairwise symmetric charges\n# q: np.array\n# q_vector carying the total charge of q_1+q_2 (always zero)\n#\n# TODO\n# ----\n# implement 2D charge array such that one can input at once all pairwise\n# symmetries with an symmetry array.\n# \"\"\"\n# M = len(charges)-1\n#\n# symmetry = symmetry*np.ones(M)\n#\n# if debug:\n# logging.info(\"{:d} unknowns, {:d} pairwise equality constraints\".format(N,M))\n# logging.info(\"symmetry list ({}):\\n{}\".format(symmetry.shape,symmetry))\n#\n# D = np.atleast_2d(np.zeros((M,N)))\n# q = np.atleast_1d(np.zeros(M))\n# D[:,charges[0]] = 1\n#\n# for j in range(M):\n# D[j,charges[j+1]] = -1.0*symmetry[j]\n#\n# if debug:\n# logging.info(\"D ({}):\\n{}\".format(D.shape,D))\n# logging.info(\"q ({}):\\n{}\".format(q.shape,q))\n#\n# return D, q\n\n\ndef constructChargegroupConstraints(chargeGroups, N, q=0, debug=False):\n \"\"\"\n Function to construct D_matrix and q_vector for charge Groups\n\n Parameters\n ----------\n chargeGroups: list, or 2-D list; ints\n list of atom indices which belong to one charge group with charge q. For\n more than one charge group you can use a two dimensional list (list of\n charge groups) and use a list q, for the charges of the charge groups.\n N: int\n Total number of atoms in your system. Needed to fix the size of the\n D_matrix and q_vector to the system size (number of atoms)\n q: scalar, or list; reals\n describes the total charge of a charge group. Therefore it is a scalar\n for one charge group and a list if more than one charge group is given\n debug: bool\n True: print debug information\n False: dont print debug information\n default=False\n\n Returns\n -------\n D_matrix: np.ndarray, dim=2\n D_matrix which carries the constraints of pairwise symmetric charges\n q_vector: np.array\n q_vector carying the total charge of q_1+q_2 (always zero)\n \"\"\"\n\n logging.debug(\"\")\n logging.debug(\"### constructChargegroupConstraints ###\")\n logging.debug(\"\")\n\n M = len(chargeGroups)\n\n q_vector = np.atleast_1d(q*np.ones(M))\n\n logging.debug(\"{:d} unknowns, {:d} pairwise equality constraints\".format(N,M))\n\n D_matrix = np.atleast_2d(np.zeros((M,N)))\n #q = np.atleast_2d(np.zeros(M))\n\n for j in range(M):\n D_matrix[j,chargeGroups[j]] = 1.0\n\n logging.debug(\"D_matrix ({}):\\n{}\".format(D_matrix.shape, D_matrix))\n logging.debug(\"q_vector ({}):\\n{}\".format(q_vector.shape, q_vector))\n\n return D_matrix, q_vector\n\ndef constructTotalChargeConstraint(charge, N, symmetry=1.0, debug=False):\n \"\"\"\n Function to construct D_matrix and q_vector for the total charge constraint\n\n Parameters:\n -----------\n charges: float\n Total required charge of system.\n N: int\n Total number of atoms in your system. Needed to fix the size of the\n D_matrix and q_vector to the system size (number of atoms)\n debug: bool\n True: print debug information\n False: dont print debug information\n default=False\n\n Return\n ------\n D: np.ndarray, dim=2\n D_matrix filled with ones\n q: np.array\n q_vector carying the (scalar) total charge\n\n \"\"\"\n\n D = np.atleast_2d(np.ones((1, N)))\n q = np.atleast_1d(charge)\n\n return D,q\n\n# reduce D to matrix of full rank (and accordingly adjust q)\n# ATTENTION: contradictory constraints might vanish unnoticed\ndef construct_D_of_full_rank(D,q):\n D_LI=[D[0]]\n q_LI=[q[0]]\n for i in range(D.shape[0]):\n tmp=[]\n for r in D_LI:\n tmp.append(r)\n tmp.append(D[i]) \n if np.linalg.matrix_rank(tmp)>len(D_LI): \n #test if row D[i] is linearly independent from all (row) vectors in D_LI\n D_LI.append(D[i]) \n #note that matrix_rank does not need to take in a square matrix\n q_LI.append(q[i])\n return np.array(D_LI), np.array(q_LI) \n\ndef read_AtomName_ChargeGroup(file_name, ase2pmd):\n \"\"\"\n Function to read in csv file of atom names and associated charge groups with \n the help of pandas. Expects dictionary of ASE indices pointing at atom and \n residue name tyuples and constructs vector of associated charge\n group. The vectors are all ordered in the same manner. It also returns\n ncgtypes, to reconstruct the charge group if atom names occure more than ones.\n\n Parameters\n ----------\n file_name: str\n name of the file which is read. File format: one line per atom,\n str, int\n [atom name],[charge group id]\n ase2pmd: dict\n dictionary of ASE atom indices mapped onto tuples of\n ParmEd names and residues\n\n Return\n ------\n cg2ase: list of list of int\n each sublist represents one charge group and contains ASE indices of \n all atoms in the group. \n cg2cgtype: list of int\n Charge groups can reoccur across several residues. \n Thus every charge group is given an (arbitrary) unique index internally.\n This list maps internal cg indices onto their original (ambiguous) id.\n ncgtypes: int\n the number of (original) charge groups which are might be used for \n more than one atom index and thus carry an ambiguous meaning.\n \"\"\"\n\n ase2pmd_df = pd.DataFrame(ase2pmd).T\n ase2pmd_df.columns = ['atom','residue'] \n \n # column 0: atom names\n # column 1: charge group number\n pmd2cg_df = pd.read_csv(file_name, sep=',', header=None, \n comment='#', names=['atom','cg']) \n\n unique_residues = ase2pmd_df['residue'].unique()\n unique_charge_groups = pmd2cg_df['cg'].unique()\n \n ncgtypes = len(unique_charge_groups)\n cg2ase = []\n cg2cgtype = [] \n for r in unique_residues:\n # atoms_in_residue = np.where(residues == r)\n for cgtype in unique_charge_groups:\n #np.where( (residues == r ) && ( names == name_cg[:,1] == cg )\n names_in_cg = pmd2cg_df[pmd2cg_df['cg'] == cgtype]['atom']\n cg_sel = ase2pmd_df['atom'].isin(names_in_cg)\n res_sel = (ase2pmd_df['residue'] == r )\n \n new_cg = ase2pmd_df[cg_sel & res_sel]\n \n if not new_cg.empty: \n cg2ase.append(new_cg.index.values)\n cg2cgtype.append(cgtype) \n \n return cg2ase, cg2cgtype, ncgtypes\n\n\ndef read_ChargeGroup_TotalCharge(file_name):\n \"\"\"\n Function to read in csv file of charge groups and charges with the help of\n pandas.\n\n Parameters\n ----------\n file_name: str\n name of the file which is read. File format: \n int, float\n [ charge group number ], [ charge ]\n\n Return\n ------\n cg_q: dict of int: float\n dictionary of charge group id ('type') and corresponding total charge.\n \"\"\"\n\n cg_q_df = pd.read_csv(file_name, sep=',', header=None, comment='#', \n index_col=0, names=['charge'])\n cg_q = cg_q_df['charge'].to_dict()\n\n return cg_q\n\n\n# ATTENTION: constructs symmetry constraints across ALL residues:\ndef read_SameChargedAtoms(file_name, ase2pmd):\n \"\"\"\n Function to read in csv file of atoms which should have the same charge.\n Automatically enforces same charges for atoms of same name, but\n possibly spread over different residues.\n\n Parameters\n ----------\n file_name: str\n name of the file which is read. Format: pairwise equalities\n str, str\n [ atom name 1 ], [ atom name 2]\n \n name: list \n atom names, as ordered in ASE\n\n Return\n ------\n sym2ase: list of list of int\n each sublist groups atoms of same charge by their ASE indices.\n \"\"\"\n logging.debug(\"\")\n logging.debug(\"### read_SameChargedAtoms ###\")\n \n ase2pmd_df = pd.DataFrame(ase2pmd).T\n ase2pmd_df.columns = ['atom','residue'] \n \n sca_df = pd.read_csv(file_name, sep=',', header=None, comment='#')\n\n sym2ase = []\n\n # constructs symmetries on atoms of same type across residues\n # this will result in redundant constraints for symmetries \n # specidfied explicitly in input file for pairwise symmetries\n # for now, we do not care about that\n # TODO: reduce all (possibly recdundant) constraints into \n # a minimal set of unique ones\n \n logging.debug(\"\")\n logging.debug(\"Constructing implict symmetries due to atom types.\")\n unique_atoms = ase2pmd_df['atom'].unique()\n for a in unique_atoms:\n new_symmetry_group = ase2pmd_df[ ase2pmd_df['atom'] == a ]\n if not new_symmetry_group.empty: \n if len(new_symmetry_group.index.values) < 2:\n logging.debug(\"Apparently, atom type {} only occurs once \"\n \"at ASE index {:d}. No symmetry constraint \"\n \"created.\".format(a,new_symmetry_group.index.values[0]))\n else:\n logging.debug(\"Add symmetry constraint for atom type {} \"\n \"at ASE indices {}.\".format(a,new_symmetry_group.index.values))\n sym2ase.append(new_symmetry_group.index.values) \n \n logging.debug(\"\")\n logging.debug(\"Constructing explicit symmetries.\")\n \n # for i, group in sca_df.iterrows():\n # sca_sel = []\n # new_symmetry_group = []\n # for atom in group:\n # # Only selects first occurence of atom type on system\n # # and appends it to symmetry group selection, as all other \n # # symmetries accross residues are enforced by implicity \n # # name-wise constraints constructed above\n # sca_sel = ase2pmd_df[ase2pmd_df['atom'] == atom]\n # if not sca_sel.empty:\n # new_symmetry_group.append( sca_sel.index[0] )\n\n # if new_symmetry_group:\n # logging.info(\"Add explicit symmetry constraint for types {} \"\n # \"at ASE indices {}.\".format(group.values,\n # new_symmetry_group ))\n # sym2ase.append( np.array(new_symmetry_group) ) \n \n for i, group in sca_df.iterrows():\n sca_sel = ase2pmd_df['atom'].isin(group)\n new_symmetry_group = ase2pmd_df[sca_sel]\n\n if not new_symmetry_group.empty: \n logging.debug(\"Add explicit symmetry constraint for types {} \"\n \"at ASE indices {}.\".format(group.values,\n new_symmetry_group.index.values))\n sym2ase.append(new_symmetry_group.index.values)\n \n return sym2ase\n\n\ndef concatenated_constraints(D_matrices, q_vectors):\n \"\"\"\n Function to concatenate D_matrices and q_vectors to one D_matrix and one\n q_vector by using numpy.hstack and numpy.vstack. The order of D_matrix in\n D_matrices and q_vector in q_vectors should be the same, otherwise the\n constraints are connected wrong.\n\n Parameters\n ----------\n D_matrices: list of numpy.ndarray (reals)\n list of all D_matrices which should be concateneted\n q_vectors: list of numpy.ndarray (reals)\n list of all q_vectors which should be concateneted\n\n Return\n ------\n D_matrix: np.ndarray, dim=2\n D_matrix which carries all constraints of the input D_matrices\n q_vector: np.array\n q_vector which carries all constraints of the input q_vectors\n \"\"\"\n\n D_matrix = D_matrices[0]\n for d in D_matrices[1:]:\n D_matrix = np.vstack([D_matrix, d])\n\n q_vector = q_vectors[0]\n for q in q_vectors[1:]:\n q_vector = np.hstack([q_vector, q])\n\n return D_matrix, q_vector\n\n\ndef read_horton_cost_function(file_name, debug=False):\n \"\"\"\n Function to read in hortons cost function. You need h5py to read it.\n We read out the three variables A, B, C which characterise the cost function\n by: X^T A X - 2 B X + C, which is the function to minimize.\n Parameters\n ----------\n file_name: str\n file name of the cost function writen by Horton, typically something like\n 'xyz.cost.h5' or 'xyz_cost.h5'.\n\n Return\n ------\n A_horton: 2D numpy.ndarray\n\n B_horton: numpy.array\n\n C_horton: float\n\n N_horton: int\n N_horton is the number of atoms of the structure\n \"\"\"\n\n import h5py\n\n cost_function = h5py.File(file_name)\n cost = cost_function['cost']\n A_horton = cost['A'][:]\n B_horton = cost['B'][:]\n C_horton = cost['C'].value\n N_horton = cost['natom'].value\n if debug:\n logging.info(\"A: {}\".format(A_horton))\n logging.info(\"B: {}\".format(B_horton))\n logging.info(\"C: {}\".format(C_horton))\n logging.info(\"N: {}\".format(N_horton))\n\n return A_horton, B_horton, C_horton, N_horton\n\n\ndef logResults(X,A,B,C,N):\n \"\"\"\n Function to log results for debugging purposes.\n \n Parameters\n ----------\n X: np.ndarray, dim=2\n Optimized results as yielded by constrainedMinimize(...)\n\n A: 2D numpy.ndarray\n B: numpy.array\n C: float\n describe the cost function used for fitting\n \n N: int\n N is the number of atoms of the structure\n \n Return\n ------\n None\n \"\"\" \n \n np.set_printoptions(precision=3)\n np.set_printoptions(suppress=True)\n\n logging.info('charges {}:\\n {}\\ncharge sum = {}\\n'.format( X[:N].T.shape,\n X[:N].T,\n X[:N].T.sum() ))\n logging.info('Lagrange multipliers {}:\\n {}'.format( X[N:].T.shape,\n X[N:].T ) )\n\n ### test the results\n logging.info( 'value of cost function: {}'.format(\n (np.dot(X.T, np.dot(A, X)) - 2*np.dot(B.T, X) - C) ) )\n \n ### constraints fulfilled?\n logging.info( \"|D({}) x({}) - q({})| = {:e}\".format(A[N:,:N].shape, X[:N].shape, B[N:].shape,\n np.linalg.norm( np.dot(A[N:,:N],X[:N]) - B[N:] ) ) )\n \n# check charge group constraints:\ndef checkChargeGroups( df, cg2ase, cg2cgtype, cg2q,\n q_cols = ['q','q_unconstrained','q_qtot_constrained',\n 'q_cg_qtot_constrained', 'q_sym_qtot_constrained']):\n \n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"##############################\") \n logging.info(\"CHARGE GROUP CONSTRAINTS CHECK\") \n logging.info(\"##############################\") \n logging.info(\"\")\n logging.info(\"atoms grouped together by their ASE indices:\")\n logging.info(\"{}\".format(cg2ase)) \n logging.info(\"\")\n logging.info(\"desired charge of each group:\")\n logging.info(\"{}\".format(cg2q))\n \n for cg_index, ase_indices_in_cg in enumerate(cg2ase):\n logging.info(\"cg {:d}, type {:d}:\".format(cg_index,cg2cgtype[cg_index]))\n for q_col in q_cols:\n q_cg = df.iloc[ase_indices_in_cg][q_col].sum() # select first charge group\n logging.info(\n \" {:>30}:{:8.4f} absolute error:{:12.4e}\".format(\n q_col,q_cg,q_cg-cg2q[cg_index]))\n \n# check symmetry constraints:\ndef checkSymmetries( df, sym2ase, \n q_cols = ['q','q_unconstrained','q_qtot_constrained',\n 'q_cg_qtot_constrained', 'q_sym_qtot_constrained']):\n \n logging.info(\"\") \n logging.info(\"\")\n logging.info(\"##########################\") \n logging.info(\"SYMMETRY CONSTRAINTS CHECK\") \n logging.info(\"##########################\") \n logging.info(\"\")\n logging.info(\"groups of equally charged atoms by their ASE indices:\")\n logging.info(\"{}\".format(sym2ase))\n \n for sym_index, ase_indices_in_sym in enumerate(sym2ase):\n #logging.info(\"cg {:d}, type {:d}:\".format(cg_index,cg2cgtype[cg_index]))\n msg = []\n for ase_index in ase_indices_in_sym:\n msg.append(\"({}, {})\".format(\n df.iloc[ase_index]['atom'], \n df.iloc[ase_index]['residue']))\n \n logging.info(\"sym {:d}: {}\".format(sym_index,\"; \".join(msg)))\n \n for q_col in q_cols:\n msg = []\n for ase_index in ase_indices_in_sym:\n msg.append(\"{:.3f}\".format(df.iloc[ase_index][q_col]))\n logging.info(\"{:>30}: {}\".format(q_col,\",\".join(msg))) \n \n logging.info(\"\")\n\n\ndef fitESPconstrained(infile_pdb, infile_top, infile_cost_h5, \n infile_atoms_in_cg_csv, infile_cg_charges_csv, \n infile_atoms_of_same_charge_csv,\n qtot = 0.0, strip_string=':SOL,CL', \n implicitHbondingPartners = {'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CB2':2,'CB3':2},\n debug=False, outfile_top = None, outfile_csv = None):\n \n \"\"\"\n Automizes the whole fitting process from importing Horton's\n cost function over reading constraints from simple text files to\n minimizing, logging and double-checking the results.\n \n Parameters\n ----------\n infile_pdb: str\n PDB file with original (united-atom) molecular structure \n infile_top: str\n GROMACS topolgy file with original (united-atom) system.\n All #includes shoulb be removed!\n infile_cost_h5: str\n Cost function by HORTON, hdf5 format\n infile_atoms_in_cg_csv: str\n file with atom - charge group assignments in simple \n \"comma separated value\" text format, one line per atom:\n str, int\n [atom name],[charge group id] \n infile_cg_charges_csv: str\n file with charge group - charge assignments in simple \n \"comma separated value\" text format, one line per charge group:\n int, float\n [ charge group number ], [ charge (in e) ]\n infile_atoms_of_same_charge_csv: str\n file with pairwise atom symmetry assignments in simple \n \"comma separated value\" text format, one line per equality:\n str, str\n [ atom name 1 ], [ atom name 2]\n will have the same charge. Apart from that, all atoms of the same \n name (but possibly spread over different residues) will have the \n same charge enforced.\n qtot: float\n The system's total charge\n strip_string: str\n Groups to remove from the initally imported topology in ParmEd.\n ':SOL,CL' by default (solvent and chlorine ions).\n implicitHbondingPartners: dict of str: int\n By default \"{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CB2':2,'CB3':2}\"\n Specifies which atoms have (how many) implicit hydrogens around them.\n These hydrogens must equal those used in QM calculations.\n debug: bool\n By default False, uses logging if True\n \n \n Return\n ------\n q: np.ndarray of float, dim=1\n fitted charges, fully constrained\n lambda: np.ndarray of float, dim=1\n Lagrange multipliers\n info_df: pandas.DataFrame\n containing information on the fit in easily accesible pandas dataframe\n cg2ase: list of list of int \n cg2cgtype: list of int\n cg2q: list of float\n sym2ase: list of list of int\n \"\"\" \n \n logging.info(\"#################\") \n logging.info(\"fitESPconstrained\") \n logging.info(\"#################\") \n\n # A: construct all-atom representation from united-atom structure and topology:\n ua_ase_struct = ase.io.read(infile_pdb)\n ua_pmd_struct = pmd.load_file(infile_pdb)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ua_pmd_top = gromacs.GromacsTopologyFile(infile_top,parametrize=False)\n # throws some warnings on angle types, does not matter for bonding info\n # if error thrown, just try to \"reduce\" .top as far as possible\n # warnings supressed as shown on\n # https://docs.python.org/2/library/warnings.html\n\n ua_pmd_top.strip(strip_string) \n #strip water and electrolyte from system (if not yet done in .top)\n ua_pmd_top.box = ua_pmd_struct.box # Needed because .pdb contains box info\n ua_pmd_top.positions = ua_pmd_struct.positions\n\n ua_names = [ a.name for a in ua_pmd_top.atoms ]\n ua_residues = [ a.residue.name for a in ua_pmd_top.atoms ]\n\n aa_ase_struct, aa_pmd_struct, aa_names, aa_residues = \\\n insertHbyList(ua_ase_struct,ua_pmd_top,\n implicitHbondingPartners,1.0)\n\n ua_count = len(ua_ase_struct) # united atoms structure\n aa_count = len(aa_ase_struct) # all atoms structure\n\n ua_ase_index = np.arange(ua_count)\n aa_ase_index = np.arange(aa_count)\n\n aa_atom_residue_list = list(zip(aa_names,aa_residues))\n aa_ase_index = range(aa_count)\n aa_ase2pmd = dict(zip(aa_ase_index,aa_atom_residue_list))\n aa_pmd2ase = dict(zip(aa_atom_residue_list,aa_ase_index))\n\n ua_atom_residue_list = list(zip(ua_names,ua_residues))\n ua_ase_index = range(ua_count)\n ua_ase2pmd = dict(zip(ua_ase_index,ua_atom_residue_list))\n ua_pmd2ase = dict(zip(ua_atom_residue_list,ua_ase_index))\n \n # TODO: distinction for ua and aa fitting:\n pmd_struct = ua_pmd_struct\n pmd_top = ua_pmd_top\n ase2pmd = ua_ase2pmd\n pmd2ase = ua_pmd2ase\n \n # B: read cost function\n \n A_horton, B_horton, C_horton, N_horton = \\\n read_horton_cost_function(file_name = infile_cost_h5)\n \n # C: read constraints files\n \n ### Charge Groups:\n # read in all charge groups and construct the corresponding constraints\n cg2ase, cg2cgtype, ncgtypes = read_AtomName_ChargeGroup(\n file_name = infile_atoms_in_cg_csv, ase2pmd = ase2pmd)\n \n cg_q = read_ChargeGroup_TotalCharge(file_name = infile_cg_charges_csv)\n \n cg2q = [ cg_q[cg] for cg in cg2cgtype ]\n \n ### Same Charged Atoms\n sym2ase = read_SameChargedAtoms(\n file_name = infile_atoms_of_same_charge_csv, ase2pmd = ase2pmd)\n \n # D: construct constraints matrices\n D_matrix_cg_red, q_vector_cg_red = constructChargegroupConstraints(\n chargeGroups = cg2ase, N = N_horton, q = cg2q, debug = debug)\n \n D_matrix_sym_red, q_vector_sym_red = constructPairwiseSymmetryConstraints(\n charges = sym2ase, N = N_horton, symmetry = 1.0, debug = False)\n \n D_matrix_qtot, q_vector_qtot = constructTotalChargeConstraint(\n charge = qtot, N = N_horton)\n \n D_matrix_all_red, q_vector_all_red = concatenated_constraints(\n D_matrices = [D_matrix_cg_red,D_matrix_sym_red,D_matrix_qtot],\n q_vectors = [q_vector_cg_red,q_vector_sym_red,q_vector_qtot]) \n \n # remove redundant constraints \n D_matrix_cg, q_vector_cg = construct_D_of_full_rank(\n D_matrix_cg_red, q_vector_cg_red)\n D_matrix_sym, q_vector_sym = construct_D_of_full_rank(\n D_matrix_sym_red, q_vector_sym_red )\n D_matrix_all, q_vector_all = construct_D_of_full_rank(\n D_matrix_all_red, q_vector_all_red)\n \n if debug:\n D_matrix_cg_qtot_red, q_vector_cg_qtot_red = concatenated_constraints(\n D_matrices = [D_matrix_cg_red,D_matrix_qtot],\n q_vectors = [q_vector_cg_red,q_vector_qtot]) \n D_matrix_cg_qtot, q_vector_cg_qtot = construct_D_of_full_rank(\n D_matrix_cg_qtot_red, q_vector_cg_qtot_red)\n \n \n D_matrix_sym_qtot_red, q_vector_sym_qtot_red = concatenated_constraints(\n D_matrices = [D_matrix_sym_red,D_matrix_qtot],\n q_vectors = [q_vector_sym_red,q_vector_qtot]) \n D_matrix_sym_qtot, q_vector_sym_qtot = construct_D_of_full_rank(\n D_matrix_sym_qtot_red, q_vector_sym_qtot_red)\n \n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"################\") \n logging.info(\"CONSTRAINTS INFO\") \n logging.info(\"################\") \n logging.info(\"\")\n logging.info(\"\")\n \n # CG CONSTRAINTS\n rank_D_cg = np.linalg.matrix_rank(D_matrix_cg)\n rank_Dq_cg = np.linalg.matrix_rank(np.hstack((D_matrix_cg,\n np.atleast_2d(q_vector_cg).T)))\n logging.info(\"{:d} CG constraints, rank {:d} \"\n \" => {:d} redundant or contradictory constraints.\".format(\n D_matrix_cg_red.shape[0], rank_D_cg, \n D_matrix_cg_red.shape[0] - rank_D_cg))\n if rank_D_cg == rank_Dq_cg:\n logging.info(\" D rank == [D,q] rank == {:d},\"\n \" solvable\".format(rank_D_cg))\n else:\n logging.info(\" D rank {:d} != [D,q] rank {:d},\"\n \" unsolvable\".format(rank_D_cg, rank_Dq_cg))\n\n # CG + QTOT CONSTRAINTS\n rank_D_cg_qtot = np.linalg.matrix_rank(D_matrix_cg_qtot)\n rank_Dq_cg_qtot = np.linalg.matrix_rank(np.hstack((D_matrix_cg_qtot,\n np.atleast_2d(q_vector_cg_qtot).T)))\n logging.info(\"{:d} CG and QTOT constraints, rank {:d} \"\n \" => {:d} redundant or contradictory constraints.\".format(\n D_matrix_cg_qtot_red.shape[0], rank_D_cg_qtot, \n D_matrix_cg_qtot_red.shape[0] - rank_D_cg_qtot))\n if rank_D_cg_qtot == rank_Dq_cg_qtot:\n logging.info(\" D rank == [D,q] rank == {:d},\"\n \" solvable\".format(rank_D_cg_qtot))\n else:\n logging.info(\" D rank {:d} != [D,q] rank {:d},\"\n \" unsolvable\".format(rank_D_cg_qtot, rank_Dq_cg_qtot))\n\n \n # SYM CONSTRAINTS\n rank_D_sym = np.linalg.matrix_rank(D_matrix_sym)\n rank_Dq_sym = np.linalg.matrix_rank(np.hstack((D_matrix_sym,\n np.atleast_2d(q_vector_sym).T)))\n logging.info(\"{:d} SYM constraints, rank {:d} \"\n \" => {:d} redundant or contradictory constraints.\".format(\n D_matrix_sym_red.shape[0], rank_D_sym, \n D_matrix_sym_red.shape[0] - rank_D_sym))\n if rank_D_sym == rank_Dq_sym:\n logging.info(\" D rank == [D,q] rank == {:d},\"\n \" solvable\".format(rank_D_sym))\n else:\n logging.info(\" D rank {:d} != [D,q] rank {:d},\" \n \" unsolvable\".format(rank_D_sym, rank_Dq_sym))\n\n # SYM + QTOT CONSTRAINTS\n rank_D_sym_qtot = np.linalg.matrix_rank(D_matrix_sym_qtot)\n rank_Dq_sym_qtot = np.linalg.matrix_rank(np.hstack((D_matrix_sym_qtot,\n np.atleast_2d(q_vector_sym_qtot).T)))\n logging.info(\"{:d} SYM and QTOT constraints, rank {:d} \"\n \" => {:d} redundant or contradictory constraints.\".format(\n D_matrix_sym_qtot_red.shape[0], rank_D_sym_qtot, \n D_matrix_sym_qtot_red.shape[0] - rank_D_sym_qtot))\n if rank_D_sym_qtot == rank_Dq_sym_qtot:\n logging.info(\" D rank == [D,q] rank == {:d},\"\n \" solvable\".format(rank_D_sym_qtot))\n else:\n logging.info(\" D rank {:d} != [D,q] rank {:d},\"\n \" unsolvable\".format(rank_D_sym_qtot, rank_Dq_sym_qtot))\n \n # ALL CONSTRAINTS\n rank_D_all = np.linalg.matrix_rank(D_matrix_all)\n rank_Dq_all = np.linalg.matrix_rank(np.hstack((D_matrix_all,\n np.atleast_2d(q_vector_all).T)))\n logging.info(\"{:d} ALL constraints, rank {:d} \"\n \" => {:d} redundant or contradictory constraint.\".format(\n D_matrix_all_red.shape[0], rank_D_all, \n D_matrix_all_red.shape[0] - rank_D_all))\n if rank_D_sym == rank_Dq_sym:\n logging.info(\" D rank == [D,q] rank == {:d},\"\n \" solvable\".format(rank_D_all))\n else:\n logging.info(\" D rank {:d} != [D,q] rank {:d},\"\n \" unsolvable\".format(rank_D_all, rank_Dq_all))\n \n # E: Minimization \n \n ### Constrained minimization\n logging.info(\"\") \n logging.info(\"########################\") \n logging.info(\"FULLY CONSTRAINED SYSTEM\") \n logging.info(\"########################\")\n logging.info(\"\") \n X, A, B = constrainedMinimize(A_matrix = A_horton,\n b_vector = B_horton,\n C_scalar = C_horton,\n D_matrix = D_matrix_all,\n q_vector = q_vector_all,\n debug = debug)\n \n ase2pmd_df = pd.DataFrame(ase2pmd).T\n ase2pmd_df.columns = ['atom','residue']\n ase2pmd_df['q'] = X[:N_horton]\n\n # additional debug cases\n if debug: \n ### Unconstrained minimization\n logging.info(\"\") \n logging.info(\"####################\") \n logging.info(\"UNCONSTRAINED SYSTEM\") \n logging.info(\"####################\")\n logging.info(\"\") \n X_unconstrained, A_unconstrained, B_unconstrained = \\\n unconstrainedMinimize(A_matrix = A_horton,\n b_vector = B_horton,\n C_scalar = C_horton,\n debug = debug)\n \n ### Total charge constraint minimization\n logging.info(\"\") \n logging.info(\"####################################\") \n logging.info(\"SYSTEM WITH TOTAL CHARGE CONSTRAINED\") \n logging.info(\"####################################\")\n logging.info(\"\") \n X_qtot_constraint, A_qtot_constraint, B_qtot_constraint = \\\n constrainedMinimize(A_matrix = A_horton,\n b_vector = B_horton,\n C_scalar = C_horton,\n D_matrix = D_matrix_qtot,\n q_vector = q_vector_qtot,\n debug = debug)\n \n ### Charge group & total charge constraint minimization \n logging.info(\"\") \n logging.info(\"######################################################\") \n logging.info(\"SYSTEM WITH TOTAL CHARGE AND CHARGE GROUPS CONSTRAINED\") \n logging.info(\"######################################################\")\n logging.info(\"\") \n X_cg_qtot, A_cg_qtot, B_cg_qtot = \\\n constrainedMinimize(A_matrix = A_horton,\n b_vector = B_horton,\n C_scalar = C_horton,\n D_matrix = D_matrix_cg_qtot,\n q_vector = q_vector_cg_qtot,\n debug = debug)\n \n ### Symmetry & total charge constraint minimization\n logging.info(\"\") \n logging.info(\"###################################################\") \n logging.info(\"SYSTEM WITH TOTAL CHARGE AND SYMMETRIES CONSTRAINED\") \n logging.info(\"###################################################\")\n logging.info(\"\")\n X_sym_qtot, A_sym_qtot, B_sym_qtot = \\\n constrainedMinimize(A_matrix = A_horton,\n b_vector = B_horton,\n C_scalar = C_horton,\n D_matrix = D_matrix_sym_qtot,\n q_vector = q_vector_sym_qtot,\n debug = debug)\n \n \n #logging.info(\"\") \n #logging.info(\"###################################\") \n #logging.info(\"FULLY CONSTRAINED SYSTEM, REDUNDANT\") \n #logging.info(\"###################################\")\n #logging.info(\"\") \n #X_red, A_red, B_red = constrainedMinimize(A_matrix = A_horton,\n # b_vector = B_horton,\n # C_scalar = C_horton,\n # D_matrix = D_matrix_all_red,\n # q_vector = q_vector_all_red,\n # debug = debug)\n\n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"#################################\") \n logging.info(\"RESULTS FOR DIFFERENT CONSTRAINTS\") \n logging.info(\"#################################\") \n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"### UNCONSTRAINED ###\")\n logResults(X_unconstrained,A_unconstrained,B_unconstrained,C_horton,N_horton)\n \n logging.info(\"\") \n logging.info(\"\")\n logging.info(\"### QTOT CONSTRAINED ###\")\n logResults(X_qtot_constraint,A_qtot_constraint,\n B_qtot_constraint,C_horton,N_horton)\n \n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"### QTOT & CG CONSTRAINED ###\")\n logResults(X_cg_qtot,A_cg_qtot,B_cg_qtot,C_horton,N_horton)\n \n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"### QTOT & SYM CONSTRAINED ###\")\n logResults(X_sym_qtot,A_sym_qtot,B_sym_qtot,C_horton,N_horton)\n \n #logging.info(\"\")\n #logging.info(\"\")\n #logging.info(\"### FULLY CONSTRAINED, REDUNDANT ###\")\n #logResults(X_red,A_red,B_red,C_horton,N_horton)\n\n\n logging.info(\"\")\n logging.info(\"\")\n logging.info(\"### FULLY CONSTRAINED ###\")\n logResults(X,A,B,C_horton,N_horton)\n \n #ase2pmd_df.columns.append(['q_unconstrained', 'q_qtot_constrained', 'q_qtot_cg_constrained'])\n ase2pmd_df['q_unconstrained'] = X_unconstrained\n ase2pmd_df['q_qtot_constrained'] = X_qtot_constraint[:N_horton]\n ase2pmd_df['q_cg_qtot_constrained'] = X_cg_qtot[:N_horton]\n ase2pmd_df['q_sym_qtot_constrained'] = X_sym_qtot[:N_horton]\n \n checkChargeGroups(ase2pmd_df,cg2ase,cg2cgtype,cg2q)\n checkSymmetries(ase2pmd_df,sym2ase)\n\n \n #atom_charge_dict = dict(zip(names[0:ua_count],charges))\n \n # one line to assign unique charge group numbers starting at 1 to ASE indices\n ase2cg = dict([(idx, cgnr+1) for cgnr,cg in enumerate(cg2ase) for idx in cg])\n logging.info(\"ase2cg: {}\".format(ase2cg)) \n\n for a in pmd_top.atoms:\n a.charge = X[ pmd2ase[(a.name,a.residue.name)] ]\n a.cgnr = ase2cg[ pmd2ase[(a.name,a.residue.name)] ]\n \n for a in pmd_top.atoms:\n logging.info(\"(name, residue): ({:>4s},{:>4s}), q = {:> .3f}, cgnr = {:>3d}\".format(\n a.name, a.residue.name, a.charge, a.cgnr)) \n\n \n if outfile_top:\n pmd_top.save(outfile_top, overwrite=True)\n \n if outfile_csv: \n ase2pmd_df.to_csv(outfile_csv, sep=',')\n \n \n logging.info(\"####\") \n logging.info(\"DONE\") \n logging.info(\"####\") \n \n return X[:N_horton], X[N_horton:], ase2pmd_df, cg2ase, cg2cgtype, cg2q, sym2ase\n\n### ACTUAL PROGRAM ###\n#--------------------#\ndef main():\n import sys\n import ast\n import argparse\n \n\n parser = argparse.ArgumentParser(prog='esp-fit-constrained.py',\n description='Estimate charges from a HORTON ESP cost function'\n 'under arbitrary constraints.')\n parser.add_argument('infile_cost_h5',metavar='cost.h5',\n help='The location of the HORTON cost function in the form '\n '\"file.h5:group/cost\". This argument must be the same as the '\n 'output argument of the script horton-esp-cost.py.')\n parser.add_argument('infile_pdb', metavar='infile.pdb',\n help='PDB file with original (united-atom) molecular structure.')\n parser.add_argument('infile_top', metavar='infile.top',\n help='GROMACS topolgy file with original (united-atom) system. '\n 'All #includes shoulb be removed!')\n parser.add_argument('infile_atoms_in_cg_csv', metavar='infile_atoms_in_cg.csv',\n help='file with atom - charge group assignments in simple ' \n '\"comma separated value\" text format, one line per atom: '\n 'str, int / [atom name],[charge group id]')\n parser.add_argument('infile_cg_charges_csv', metavar='infile_cg_charges.csv',\n help='file with charge group - charge assignments in simple' \n '\"comma separated value\" text format, one line per charge group:'\n 'int, float / [ charge group number ], [ charge (in e) ]')\n parser.add_argument('infile_atoms_of_same_charge_csv', \n metavar='infile_atoms_of_same_charge.csv',\n help='file with pairwise atom symmetry assignments in simple' \n '\"comma separated value\" text format, one line per equality:'\n 'str, str / [ atom name 1 ], [ atom name 2] will have the same charge. '\n 'Apart from that, all atoms of the same name (but possibly spread over '\n 'different residues) will have the same charge enforced.') \n \n parser.add_argument('outfile_top', nargs='?', metavar='outfile.top', \n default=None, help=\"GROMACS .top output file\"\n \"with updated charges according to given .hdf5\")\n parser.add_argument('outfile_csv', metavar='outfile.csv',\n help='Fitted charges will be written to a simple text file.')\n \n parser.add_argument('--qtot', '-q', default=0.0, type=float,\n help='The total charge of the system. [default=%(default)s]')\n parser.add_argument('--insertion-rules','-i',\n default=\"{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CB2':2,'CB3':2}\",\n help=\"A string representation of a python dictionary, describing how \"\n \"many implicit hydrogens have been inserted at which atom.\"\n \"Example and default: \"\n \"{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CB2':2,'CB3':2}\")\n parser.add_argument('-v','--verbose', action='store_true',\n help=\"Prints a lot of information.\")\n\n args = parser.parse_args()\n \n if args.verbose == True:\n loglevel = logging.DEBUG\n else:\n loglevel = logging.WARNING\n\n logging.basicConfig(stream=sys.stdout, level=loglevel) \n logging.info('Using replacement rules \"{}\"...'.format(args.insertion_rules))\n \n implicitHbondingPartners = ast.literal_eval(args.insertion_rules)\n \n #q, lagrange_multiplier, info_df, cg2ase, cg2cgtype, cg2q, sym2ase\n q, lagrange_multiplier, info_df, cg2ase, cg2cgtype, cg2q, sym2ase = \\\n fitESPconstrained(infile_pdb = args.infile_pdb, \n infile_top = args.infile_top, \n infile_cost_h5 = args.infile_cost_h5, \n infile_atoms_in_cg_csv = args.infile_atoms_in_cg_csv, \n infile_cg_charges_csv = args.infile_cg_charges_csv, \n infile_atoms_of_same_charge_csv = args.infile_atoms_of_same_charge_csv,\n qtot = args.qtot, strip_string=':SOL,CL', \n implicitHbondingPartners = implicitHbondingPartners, \n debug = args.verbose, outfile_top=args.outfile_top,\n outfile_csv=args.outfile_csv)\n \n # np.savetxt(args.outfile_csv, q, delimiter=',') \n \nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.linalg.matrix_rank",
"numpy.zeros",
"numpy.set_printoptions",
"numpy.block",
"numpy.ones",
"pandas.DataFrame",
"numpy.atleast_1d",
"numpy.arange",
"numpy.linalg.solve",
"numpy.hstack",
"pandas.read_csv",
"numpy.vstack",
"numpy.atleast_2d"
]
]
|
ZhangGongjie/Soft-NMS-for-Rotated-Rectangles | [
"52f274c16e68685f894ca0c5154a046ac658e490"
]
| [
"softnms_rotate.py"
]
| [
"# -*- coding: utf-8 -*-\n# Soft NMS for rotated rectangle, cpu implementation.\n# Author: Gongjie Zhang \n# [email protected]\n# or [email protected]\n\nimport numpy as np\nimport cv2\n\ndef softnms_rotate_cpu(boxes, scores, iou_threshold, final_threshold=0.001):\n \"\"\"\n :param boxes: format [x_c, y_c, w, h, theta(degrees)]\n :param scores: scores of boxes\n :param iou_threshold: iou threshold (usually 0.7 or 0.3)\n :param final_threshold: usually 0.001, if weighted score less than this value, discard the box\n\n :return: the remaining INDEX of boxes\n\n Note that this function changes \n \"\"\"\n\n EPSILON = 1e-5 # a very small number\n pos = 0 # a position index\n\n N = boxes.shape[0] # number of input bounding boxes\n \n for i in range(N):\n\n maxscore = scores[i]\n maxpos = i\n\n tbox = boxes[i,:] \n tscore = scores[i]\n\n pos = i + 1\n\n # get bounding box with maximum score\n while pos < N:\n if maxscore < scores[pos]:\n maxscore = scores[pos]\n maxpos = pos\n pos = pos + 1\n\n # Add max score bounding box as a detection result\n boxes[i,:] = boxes[maxpos,:]\n scores[i] = scores[maxpos]\n # swap ith box with position of max box\n boxes[maxpos,:] = tbox\n scores[maxpos] = tscore\n\n tbox = boxes[i,:]\n tscore = scores[i]\n tarea = tbox[2] * tbox[3]\n\n pos = i + 1\n\n # NMS iterations, note that N changes if detection boxes fall below final_threshold\n while pos < N:\n box = boxes[pos, :]\n score = scores[pos]\n area = box[2] * box[3]\n try:\n int_pts = cv2.rotatedRectangleIntersection(((tbox[0], tbox[1]), (tbox[2], tbox[3]), tbox[4]), ((box[0], box[1]), (box[2], box[3]), box[4]))[1]\n if int_pts is not None:\n order_pts = cv2.convexHull(int_pts, returnPoints=True)\n int_area = cv2.contourArea(order_pts)\n inter = int_area * 1.0 / (tarea + area - int_area + EPSILON) # compute IoU\n else:\n inter = 0\n except:\n \"\"\"\n cv2.error: /io/opencv/modules/imgproc/src/intersection.cpp:247:\n error: (-215) intersection.size() <= 8 in function rotatedRectangleIntersection\n \"\"\"\n inter = 0.9999\n\n # Soft NMS, weight computation.\n if inter > iou_threshold:\n weight = 1 - inter\n else:\n weight = 1\n scores[pos] = weight * scores[pos]\n\n # if box score fall below final_threshold, discard it by swapping with last box\n # also, update N\n if scores[pos] < final_threshold:\n boxes[pos, :] = boxes[N-1, :]\n scores[pos] = scores[N-1]\n N = N - 1\n pos = pos - 1 \n\n pos = pos + 1\n\n keep = [i for i in range(N)]\n return np.array(keep, np.int64)\n\n\n\n\n# for testing\nif __name__ == '__main__':\n\n boxes = np.array([[50, 50, 100, 100, 0],\n [50, 50, 100, 100, 0],\n [50, 50, 100, 100, -45.],\n [200, 200, 100, 105, 0.]])\n\n scores = np.array([0.99, 0.88, 0.66, 0.77])\n\n result = softnms_rotate_cpu(boxes, scores, 0.3)\n\n print(boxes)\n\n print(result)"
]
| [
[
"numpy.array"
]
]
|
Ankur3107/swig | [
"b702933bc4a690cfb4820ca67f47523bc5f76376"
]
| [
"global_utils/anchors.py"
]
| [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport pdb\n\n\nclass Anchors(nn.Module):\n def __init__(self, pyramid_levels=None, strides=None, sizes=None, ratios=None, scales=None):\n super(Anchors, self).__init__()\n\n if pyramid_levels is None:\n self.pyramid_levels = [4, 5, 6, 7]\n if strides is None:\n self.strides = [2 ** x for x in self.pyramid_levels]\n if sizes is None:\n self.sizes = [2 ** (x + 2) for x in self.pyramid_levels]\n if ratios is None:\n self.ratios = np.array([0.35, 0.65, 0.95, 1.38, 2.3])\n if scales is None:\n self.scales = np.array([2 ** 0])\n\n def forward(self, image):\n \n image_shape = image.shape[2:]\n image_shape = np.array(image_shape)\n\n #image_shape = torch.tensor(image_shape)\n image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]\n\n # compute anchors over all pyramid levels\n all_anchors = np.zeros((0, 4)).astype(np.float32)\n #all_anchors = torch.zeros((0, 4))\n\n\n for idx, p in enumerate(self.pyramid_levels):\n anchors = generate_anchors(base_size=self.sizes[idx], ratios=self.ratios, scales=self.scales)\n shifted_anchors = shift(image_shapes[idx], self.strides[idx], anchors)\n all_anchors = np.append(all_anchors, shifted_anchors, axis=0)\n\n all_anchors = np.expand_dims(all_anchors, axis=0)\n\n return torch.from_numpy(all_anchors.astype(np.float32)).cuda()\n\ndef generate_anchors(base_size=16, ratios=None, scales=None):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales w.r.t. a reference window.\n \"\"\"\n\n if ratios is None:\n ratios = np.array([0.5, 1, 2])\n\n if scales is None:\n scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n\n num_anchors = len(ratios) * len(scales)\n\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n #anchors = torch.zeros((num_anchors, 4))\n\n #pdb.set_trace()\n # scale base_size\n anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T\n\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))\n\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors\n\ndef compute_shape(image_shape, pyramid_levels):\n \"\"\"Compute shapes based on pyramid levels.\n\n :param image_shape:\n :param pyramid_levels:\n :return:\n \"\"\"\n image_shape = np.array(image_shape[:2])\n image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]\n return image_shapes\n\n\ndef anchors_for_shape(\n image_shape,\n pyramid_levels=None,\n ratios=None,\n scales=None,\n strides=None,\n sizes=None,\n shapes_callback=None,\n):\n\n image_shapes = compute_shape(image_shape, pyramid_levels)\n\n # compute anchors over all pyramid levels\n all_anchors = np.zeros((0, 4))\n for idx, p in enumerate(pyramid_levels):\n anchors = generate_anchors(base_size=sizes[idx], ratios=ratios, scales=scales)\n shifted_anchors = shift(image_shapes[idx], strides[idx], anchors)\n all_anchors = np.append(all_anchors, shifted_anchors, axis=0)\n\n return all_anchors\n\n\ndef shift(shape, stride, anchors):\n shift_x = (np.arange(0, shape[1]) + 0.5) * stride\n shift_y = (np.arange(0, shape[0]) + 0.5) * stride\n\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n\n shifts = np.vstack((\n shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel()\n )).transpose()\n\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = anchors.shape[0]\n K = shifts.shape[0]\n all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n all_anchors = all_anchors.reshape((K * A, 4))\n\n return all_anchors\n\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"numpy.tile",
"numpy.arange",
"numpy.append",
"numpy.meshgrid",
"numpy.expand_dims"
]
]
|
campagnola/pynwb | [
"b3f1034909ac4462378e79d0e438dc5b803e5fbf"
]
| [
"tests/integration/ui_write/test_ecephys.py"
]
| [
"import unittest2 as unittest\n\nimport numpy as np\n\nfrom pynwb.form.build import GroupBuilder, DatasetBuilder, LinkBuilder, RegionBuilder, ReferenceBuilder\n\nfrom pynwb.ecephys import * # noqa: F403\nfrom pynwb.misc import UnitTimes\n\nfrom . import base\n\n\nclass TestUnitTimesIO(base.TestDataInterfaceIO):\n\n def setUpContainer(self):\n # self.spike_unit1 = SpikeUnit('unit1', [0, 1, 2], 'spike unit1 description', 'spike units source')\n # self.spike_unit2 = SpikeUnit('unit2', [3, 4, 5], 'spike unit2 description', 'spike units source')\n ut = UnitTimes('UnitTimes integration test', name='UnitTimesTest')\n ut.add_spike_times(0, [0, 1, 2])\n ut.add_spike_times(1, [3, 4, 5])\n return ut\n\n def setUpBuilder(self):\n ids_builder = DatasetBuilder('unit_ids', [0, 1],\n attributes={'neurodata_type': 'ElementIdentifiers',\n 'namespace': 'core',\n 'help': 'unique identifiers for a list of elements'})\n st_builder = DatasetBuilder('spike_times', [0, 1, 2, 3, 4, 5],\n attributes={'neurodata_type': 'VectorData',\n 'namespace': 'core',\n 'help': 'Values for a list of elements'})\n sti_builder = DatasetBuilder('spike_times_index',\n [RegionBuilder(slice(0, 3), st_builder), RegionBuilder(slice(3, 6), st_builder)],\n attributes={'neurodata_type': 'VectorIndex',\n 'namespace': 'core',\n 'help': 'indexes into a list of values for a list of elements'})\n return GroupBuilder('UnitTimesTest',\n attributes={'neurodata_type': 'UnitTimes',\n 'namespace': 'core',\n 'help': 'Estimated spike times from a single unit',\n 'source': 'UnitTimes integration test'},\n datasets={'unit_ids': ids_builder,\n 'spike_times': st_builder,\n 'spike_times_index': sti_builder})\n\n def test_get_spike_times(self):\n ut = self.roundtripContainer()\n received = ut.get_unit_spike_times(0)\n self.assertTrue(np.array_equal(received, [0, 1, 2]))\n received = ut.get_unit_spike_times(1)\n self.assertTrue(np.array_equal(received, [3, 4, 5]))\n\n\nclass TestElectrodeGroupIO(base.TestMapRoundTrip):\n\n def setUpContainer(self):\n self.dev1 = Device('dev1', 'a test source') # noqa: F405\n return ElectrodeGroup('elec1', 'a test source', # noqa: F405\n 'a test ElectrodeGroup',\n 'a nonexistent place',\n self.dev1)\n\n def setUpBuilder(self):\n device_builder = GroupBuilder('dev1',\n attributes={'neurodata_type': 'Device',\n 'namespace': 'core',\n 'help': 'A recording device e.g. amplifier',\n 'source': 'a test source'})\n return GroupBuilder('elec1',\n attributes={'neurodata_type': 'ElectrodeGroup',\n 'namespace': 'core',\n 'help': 'A physical grouping of channels',\n 'description': 'a test ElectrodeGroup',\n 'location': 'a nonexistent place',\n 'source': 'a test source'},\n links={\n 'device': LinkBuilder('device', device_builder)\n })\n\n def addContainer(self, nwbfile):\n ''' Should take an NWBFile object and add the container to it '''\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.container)\n\n def getContainer(self, nwbfile):\n ''' Should take an NWBFile object and return the Container'''\n return nwbfile.get_electrode_group(self.container.name)\n\n\nclass TestElectricalSeriesIO(base.TestDataInterfaceIO):\n\n @staticmethod\n def make_electrode_table(self):\n self.table = ElectrodeTable('electrodes') # noqa: F405\n self.dev1 = Device('dev1', 'a test source') # noqa: F405\n self.group = ElectrodeGroup('tetrode1', 'a test source', # noqa: F405\n 'tetrode description', 'tetrode location', self.dev1)\n self.table.add_row(1, 1.0, 2.0, 3.0, -1.0, 'CA1', 'none', 'first channel of tetrode', self.group)\n self.table.add_row(2, 1.0, 2.0, 3.0, -2.0, 'CA1', 'none', 'second channel of tetrode', self.group)\n self.table.add_row(3, 1.0, 2.0, 3.0, -3.0, 'CA1', 'none', 'third channel of tetrode', self.group)\n self.table.add_row(4, 1.0, 2.0, 3.0, -4.0, 'CA1', 'none', 'fourth channel of tetrode', self.group)\n\n @staticmethod\n def get_table_builder(self):\n self.device_builder = GroupBuilder('dev1',\n attributes={'neurodata_type': 'Device',\n 'namespace': 'core',\n 'help': 'A recording device e.g. amplifier',\n 'source': 'a test source'})\n self.eg_builder = GroupBuilder('tetrode1',\n attributes={'neurodata_type': 'ElectrodeGroup',\n 'namespace': 'core',\n 'help': 'A physical grouping of channels',\n 'description': 'tetrode description',\n 'location': 'tetrode location',\n 'source': 'a test source'},\n links={\n 'device': LinkBuilder('device', self.device_builder)\n })\n\n data = [\n (1, 1.0, 2.0, 3.0, -1.0, 'CA1', 'none', 'first channel of tetrode',\n ReferenceBuilder(self.eg_builder), 'tetrode1'),\n (2, 1.0, 2.0, 3.0, -2.0, 'CA1', 'none', 'second channel of tetrode',\n ReferenceBuilder(self.eg_builder), 'tetrode1'),\n (3, 1.0, 2.0, 3.0, -3.0, 'CA1', 'none', 'third channel of tetrode',\n ReferenceBuilder(self.eg_builder), 'tetrode1'),\n (4, 1.0, 2.0, 3.0, -4.0, 'CA1', 'none', 'fourth channel of tetrode',\n ReferenceBuilder(self.eg_builder), 'tetrode1')\n ]\n return DatasetBuilder('electrodes', data,\n attributes={'neurodata_type': 'ElectrodeTable',\n 'namespace': 'core',\n 'help': 'a table for storing data about extracellular electrodes'})\n\n def setUpContainer(self):\n self.make_electrode_table(self)\n region = ElectrodeTableRegion(self.table, [0, 2], 'the first and third electrodes') # noqa: F405\n data = list(zip(range(10), range(10, 20)))\n timestamps = list(map(lambda x: x/10, range(10)))\n ret = ElectricalSeries('test_eS', 'a hypothetical source', data, region, timestamps=timestamps) # noqa: F405\n return ret\n\n def setUpBuilder(self):\n table_builder = self.get_table_builder(self)\n data = list(zip(range(10), range(10, 20)))\n timestamps = list(map(lambda x: x/10, range(10)))\n return GroupBuilder('test_eS',\n attributes={'source': 'a hypothetical source',\n 'namespace': base.CORE_NAMESPACE,\n 'comments': 'no comments',\n 'description': 'no description',\n 'neurodata_type': 'ElectricalSeries',\n 'help': 'Stores acquired voltage data from extracellular recordings'},\n datasets={'data': DatasetBuilder('data',\n data,\n attributes={'unit': 'volt',\n 'conversion': 1.0,\n 'resolution': 0.0}),\n 'timestamps': DatasetBuilder('timestamps',\n timestamps,\n attributes={'unit': 'Seconds', 'interval': 1}),\n 'electrodes': DatasetBuilder('electrodes', RegionBuilder([0, 2], table_builder),\n attributes={\n 'neurodata_type': 'ElectrodeTableRegion',\n 'namespace': 'core',\n 'description': 'the first and third electrodes',\n 'help': 'a subset (i.e. slice or region) of an ElectrodeTable'})}) # noqa: E501\n\n def addContainer(self, nwbfile):\n ''' Should take an NWBFile object and add the container to it '''\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)\n\n def test_eg_ref(self):\n read = self.roundtripContainer()\n row1 = read.electrodes[0]\n row2 = read.electrodes[1]\n self.assertIsInstance(row1['group'], ElectrodeGroup) # noqa: F405\n self.assertIsInstance(row2['group'], ElectrodeGroup) # noqa: F405\n\n\nclass TestMultiElectricalSeries(base.TestDataInterfaceIO):\n\n def setUpElectricalSeriesContainers(self):\n TestElectricalSeriesIO.make_electrode_table(self)\n region1 = ElectrodeTableRegion(self.table, [0, 2], 'the first and third electrodes') # noqa: F405\n region2 = ElectrodeTableRegion(self.table, [1, 3], 'the second and fourth electrodes') # noqa: F405\n data1 = list(zip(range(10), range(10, 20)))\n data2 = list(zip(reversed(range(10)), reversed(range(10, 20))))\n timestamps = list(map(lambda x: x/10, range(10)))\n es1 = ElectricalSeries('test_eS1', 'a hypothetical source', data1, region1, timestamps=timestamps) # noqa: F405\n es2 = ElectricalSeries('test_eS2', 'a hypothetical source', data2, region2, timestamps=timestamps) # noqa: F405\n return (es1, es2)\n\n def setUpElectricalSeriesBuilders(self):\n table_builder = TestElectricalSeriesIO.get_table_builder(self)\n data = list(zip(range(10), range(10, 20)))\n timestamps = list(map(lambda x: x/10, range(10)))\n es1 = GroupBuilder('test_eS1',\n attributes={'source': 'a hypothetical source',\n 'namespace': base.CORE_NAMESPACE,\n 'comments': 'no comments',\n 'description': 'no description',\n 'neurodata_type': 'ElectricalSeries',\n 'help': 'Stores acquired voltage data from extracellular recordings'},\n datasets={'data': DatasetBuilder('data',\n data,\n attributes={'unit': 'volt',\n 'conversion': 1.0,\n 'resolution': 0.0}),\n 'timestamps': DatasetBuilder('timestamps',\n timestamps,\n attributes={'unit': 'Seconds', 'interval': 1}),\n 'electrodes': DatasetBuilder('electrodes', RegionBuilder([0, 2], table_builder),\n attributes={\n 'neurodata_type': 'ElectrodeTableRegion',\n 'namespace': 'core',\n 'description': 'the first and third electrodes',\n 'help': 'a subset (i.e. slice or region) of an ElectrodeTable'})}) # noqa: E501\n data = list(zip(reversed(range(10)), reversed(range(10, 20))))\n es2 = GroupBuilder('test_eS2',\n attributes={'source': 'a hypothetical source',\n 'namespace': base.CORE_NAMESPACE,\n 'comments': 'no comments',\n 'description': 'no description',\n 'neurodata_type': 'ElectricalSeries',\n 'help': 'Stores acquired voltage data from extracellular recordings'},\n datasets={'data': DatasetBuilder('data',\n data,\n attributes={'unit': 'volt',\n 'conversion': 1.0,\n 'resolution': 0.0}),\n 'timestamps': DatasetBuilder('timestamps',\n timestamps,\n attributes={'unit': 'Seconds', 'interval': 1}),\n 'electrodes': DatasetBuilder('electrodes', RegionBuilder([1, 3], table_builder),\n attributes={\n 'neurodata_type': 'ElectrodeTableRegion',\n 'namespace': 'core',\n 'description': 'the second and fourth electrodes',\n 'help': 'a subset (i.e. slice or region) of an ElectrodeTable'})}) # noqa: E501\n return (es1, es2)\n\n def addContainer(self, nwbfile):\n ''' Should take an NWBFile object and add the container to it '''\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)\n\n def setUpContainer(self):\n raise unittest.SkipTest('Cannot run test unless addContainer is implemented')\n\n def setUpBuilder(self):\n raise unittest.SkipTest('Cannot run test unless addContainer is implemented')\n\n\nclass TestLFP(TestMultiElectricalSeries):\n\n def setUpContainer(self):\n es = self.setUpElectricalSeriesContainers()\n ret = LFP('LFP roundtrip test', es) # noqa: F405\n return ret\n\n def setUpBuilder(self):\n es = self.setUpElectricalSeriesBuilders()\n ret = GroupBuilder('LFP',\n attributes={'source': 'LFP roundtrip test',\n 'namespace': base.CORE_NAMESPACE,\n 'neurodata_type': 'LFP',\n 'help': ('LFP data from one or more channels. Filter properties should be '\n 'noted in the ElectricalSeries')},\n groups={'test_es1': es[0], 'test_es2': es[1]})\n return ret\n\n\nclass TestFilteredEphys(TestMultiElectricalSeries):\n\n def setUpContainer(self):\n es = self.setUpElectricalSeriesContainers()\n ret = FilteredEphys('FilteredEphys roundtrip test', es) # noqa: F405\n return ret\n\n def setUpBuilder(self):\n es = self.setUpElectricalSeriesBuilders()\n ret = GroupBuilder('FilteredEphys',\n attributes={'source': 'FilteredEphys roundtrip test',\n 'namespace': base.CORE_NAMESPACE,\n 'neurodata_type': 'FilteredEphys',\n 'help': ('Ephys data from one or more channels that is subjected to filtering, '\n 'such as for gamma or theta oscillations (LFP has its own interface). '\n 'Filter properties should be noted in the ElectricalSeries')},\n groups={'test_es1': es[0], 'test_es2': es[1]})\n return ret\n\n\nclass TestClusteringIO(base.TestDataInterfaceIO):\n\n def setUpBuilder(self):\n return GroupBuilder('Clustering',\n attributes={\n 'help': 'Clustered spike data, whether from automatic clustering tools (eg, klustakwik) or as a result of manual sorting', # noqa: E501\n 'source': \"an example source for Clustering\",\n 'neurodata_type': 'Clustering',\n 'namespace': base.CORE_NAMESPACE},\n datasets={\n 'num': DatasetBuilder('num', [0, 1, 2, 0, 1, 2]),\n 'times': DatasetBuilder('times', list(range(10, 61, 10))),\n 'peak_over_rms': DatasetBuilder('peak_over_rms', [100, 101, 102]),\n 'description': DatasetBuilder('description', \"A fake Clustering interface\")})\n\n def setUpContainer(self):\n return Clustering(\"an example source for Clustering\", \"A fake Clustering interface\", # noqa: F405\n [0, 1, 2, 0, 1, 2], [100, 101, 102], list(range(10, 61, 10)))\n"
]
| [
[
"numpy.array_equal"
]
]
|
ksouvik52/hiresnn2021 | [
"05e48a0d87dcb2c2c18897c144b0e50e72967632"
]
| [
"attack_model_spike_cnt.py"
]
| [
"import torch.nn as nn\nimport math\nimport torch.nn.functional as F\nimport torch\nimport copy\nimport numpy as np\n\nclass Attack(object):\n\n def __init__(self, dataloader, criterion=None, gpu_id=0, \n epsilon=0.031, attack_method='pgd'):\n \n if criterion is not None:\n self.criterion = criterion\n else:\n self.criterion = nn.CrossEntropyLoss()\n \n self.dataloader = dataloader\n self.epsilon = epsilon\n self.gpu_id = gpu_id #this is integer\n\n if attack_method == 'fgsm':\n self.attack_method = self.fgsm\n elif attack_method == 'pgd':\n self.attack_method = self.pgd \n \n def update_params(self, epsilon=None, dataloader=None, attack_method=None):\n if epsilon is not None:\n self.epsilon = epsilon\n if dataloader is not None:\n self.dataloader = dataloader\n \n if attack_method is not None:\n if attack_method == 'fgsm':\n self.attack_method = self.fgsm\n elif attack_method == 'pgd':\n self.attack_method = self.pgd\n\n ## For SNN pgd takes two more args: mean and std to manually perform normalization for \n ## each of the k iterated perturbed data generated intermediately. \n def fgsm(self, model, data, target, args, data_min=0, data_max=1):\n \n if args.dataset == 'CIFAR10':\n mean = torch.Tensor(np.array([0.4914, 0.4822, 0.4465])[:, np.newaxis, np.newaxis])\n mean = mean.expand(3, 32, 32).cuda()\n std = torch.Tensor(np.array([0.2023, 0.1994, 0.2010])[:, np.newaxis, np.newaxis])\n std = std.expand(3, 32, 32).cuda()\n if args.dataset == 'CIFAR100':\n mean = torch.Tensor(np.array([0.5071,0.4867,0.4408])[:, np.newaxis, np.newaxis])\n mean = mean.expand(3, 32, 32).cuda()\n std = torch.Tensor(np.array([0.2675,0.2565,0.2761])[:, np.newaxis, np.newaxis])\n std = std.expand(3, 32, 32).cuda()\n\n model.eval()\n # perturbed_data = copy.deepcopy(data)\n perturbed_data = data.clone()\n \n perturbed_data.requires_grad = True\n #As we take the raw un-normalized data, we convert to a normalized data\n # and then feed to model\n perturbed_data_norm = perturbed_data -mean\n perturbed_data_norm.div_(std)\n output,_ = model(perturbed_data_norm)\n #print('perturbed_data.requires_grad:', perturbed_data.requires_grad) \n loss = F.cross_entropy(output, target)\n if perturbed_data.grad is not None:\n perturbed_data.grad.data.zero_()\n\n loss.backward()\n \n # Collect the element-wise sign of the data gradient\n sign_data_grad = perturbed_data.grad.data.sign()\n perturbed_data.requires_grad = False\n\n with torch.no_grad():\n # Create the perturbed image by adjusting each pixel of the input image\n perturbed_data += self.epsilon*sign_data_grad\n # Adding clipping to maintain [min,max] range, default 0,1 for image\n perturbed_data.clamp_(data_min, data_max)\n \n return perturbed_data\n \n ## For SNN pgd takes two more args: mean and std to manually perform normalization for \n ## each of the k iterated perturbed data generated intermediately.\n def pgd(self, model, data, target, k=7, a=0.01, random_start=True,\n d_min=0, d_max=1): #to reduce time for SNN kept k = 3, or else for ANN we use k=7 \n \n mean = torch.Tensor(np.array([0.4914, 0.4822, 0.4465])[:, np.newaxis, np.newaxis])\n mean = mean.expand(3, 32, 32).cuda()\n std = torch.Tensor(np.array([0.2023, 0.1994, 0.2010])[:, np.newaxis, np.newaxis])\n std = std.expand(3, 32, 32).cuda()\n\n model.eval()\n # perturbed_data = copy.deepcopy(data)\n perturbed_data = data.clone() \n perturbed_data.requires_grad = True\n \n data_max = data + self.epsilon\n data_min = data - self.epsilon\n data_max.clamp_(d_min, d_max)\n data_min.clamp_(d_min, d_max)\n\n if random_start:\n with torch.no_grad():\n perturbed_data.data = data + perturbed_data.uniform_(-1*self.epsilon, self.epsilon)\n perturbed_data.data.clamp_(d_min, d_max)\n \n for _ in range(k):\n ##for SNNs we don't have a mean, std layer separately, so we manually do mean\n ## subtraction here with every perturbed data generated\n\n in1 = perturbed_data - mean\n in1.div_(std)\n output,_ = model( in1 )\n #print('output shape:{}, target shape:{}', output.shape, target.shape) \n loss = F.cross_entropy(output, target)\n \n if perturbed_data.grad is not None:\n perturbed_data.grad.data.zero_()\n \n loss.backward()\n data_grad = perturbed_data.grad.data\n \n with torch.no_grad():\n perturbed_data.data += a * torch.sign(data_grad)\n perturbed_data.data = torch.max(torch.min(perturbed_data, data_max),\n data_min)\n perturbed_data.requires_grad = False\n \n return perturbed_data\n"
]
| [
[
"numpy.array",
"torch.min",
"torch.no_grad",
"torch.sign",
"torch.nn.functional.cross_entropy",
"torch.nn.CrossEntropyLoss"
]
]
|
realgt/gpt-2 | [
"81258d0584ba8f3f436620361d0e035ab62deb54"
]
| [
"src/interactive_conditional_samples.py"
]
| [
"#!/usr/bin/env python3\n\nimport fire\nimport json\nimport os\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nimport model, sample, encoder\n\ndef interact_model(\n model_name='117M',\n seed=None,\n nsamples=1,\n batch_size=1,\n length=None,\n temperature=1,\n top_k=0,\n top_p=0.0\n):\n \"\"\"\n Interactively run the model\n :model_name=117M : String, which model to use\n :seed=None : Integer seed for random number generators, fix seed to reproduce\n results\n :nsamples=1 : Number of samples to return total\n :batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.\n :length=None : Number of tokens in generated text, if None (default), is\n determined by model hyperparameters\n :temperature=1 : Float value controlling randomness in boltzmann\n distribution. Lower temperature results in less random completions. As the\n temperature approaches zero, the model will become deterministic and\n repetitive. Higher temperature results in more random completions.\n :top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n considered for each step (token), resulting in deterministic completions,\n while 40 means 40 words are considered at each step. 0 (default) is a\n special setting meaning no restrictions. 40 generally is a good value.\n :top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,\n overriding top_k if set to a value > 0. A good setting is 0.9.\n \"\"\"\n if batch_size is None:\n batch_size = 1\n assert nsamples % batch_size == 0\n\n enc = encoder.get_encoder(model_name)\n hparams = model.default_hparams()\n with open(os.path.join('models', model_name, 'hparams.json')) as f:\n dict2 = json.load(f)\n for key, value in hparams.items():\n hparams[key] = dict2[key]\n\n if length is None:\n length = hparams['n_ctx']\n elif length > hparams['n_ctx']:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams['n_ctx'])\n\n with tf.Session(graph=tf.Graph()) as sess:\n context = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n output = sample.sample_sequence(\n hparams=hparams, length=length,\n context=context,\n batch_size=batch_size,\n temperature=temperature, top_k=top_k, top_p=top_p\n )\n\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))\n saver.restore(sess, ckpt)\n\n while True:\n raw_text = input(\"Model prompt >>> \")\n while not raw_text:\n print('Prompt should not be empty!')\n raw_text = input(\"Model prompt >>> \")\n context_tokens = enc.encode(raw_text)\n generated = 0\n for _ in range(nsamples // batch_size):\n out = sess.run(output, feed_dict={\n context: [context_tokens for _ in range(batch_size)]\n })[:, len(context_tokens):]\n for i in range(batch_size):\n generated += 1\n text = enc.decode(out[i])\n print(\"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40)\n print(text)\n print(\"=\" * 80)\n\nif __name__ == '__main__':\n fire.Fire(interact_model)\n"
]
| [
[
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.Graph",
"numpy.random.seed",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.set_random_seed"
]
]
|
k-khr/python_for_bayes | [
"3dc2e2b2f20653489b5cb4880dd09599918d5056"
]
| [
"pybayes_mcmc_sv.py"
]
| [
"# -*- coding: utf-8 -*-\n#%% NumPyの読み込み\nimport numpy as np\n# SciPyのstatsモジュールの読み込み\nimport scipy.stats as st\n# Pandasの読み込み\nimport pandas as pd\n# PyMCの読み込み\nimport pymc3 as pm\n# MatplotlibのPyplotモジュールの読み込み\nimport matplotlib.pyplot as plt\n# 日本語フォントの設定\nfrom matplotlib.font_manager import FontProperties\nimport sys\nif sys.platform.startswith('win'):\n FontPath = 'C:\\\\Windows\\\\Fonts\\\\meiryo.ttc'\nelif sys.platform.startswith('darwin' ):\n FontPath = '/System/Library/Fonts/ヒラギノ角ゴシック W4.ttc'\nelif sys.platform.startswith('linux'):\n FontPath = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'\nelse:\n sys.exit('このPythonコードが対応していないOSを使用しています.')\njpfont = FontProperties(fname=FontPath)\n#%% ドル円為替レート日次データの読み込み\n\"\"\"\n The Pacific Exchange Rate Serviceより入手\n http://fx.sauder.ubc.ca/data.html\n\"\"\"\ndata = pd.read_csv('dollaryen.csv', index_col=0)\ny = 100 * np.diff(np.log(data.values.ravel()))\nn = y.size\nseries_date = pd.to_datetime(data.index[1:])\n#%% SVモデルの設定\nsv_model = pm.Model()\nwith sv_model:\n nu = pm.Exponential('nu', 0.2)\n sigma = pm.HalfCauchy('sigma', beta=1.0)\n rho = pm.Uniform('rho', lower=-1.0, upper=1.0)\n omega = pm.HalfCauchy('omega', beta=1.0)\n log_vol = pm.AR('log_vol', rho, sd=omega, shape=n,\n init=pm.Normal.dist(sd=omega/pm.math.sqrt(1 - rho**2)))\n observation = pm.StudentT('y', nu, sd=sigma*pm.math.exp(log_vol),\n observed=y)\n#%% 事後分布からのサンプリング\nn_draws = 5000\nn_chains = 4\nn_tune = 2000\nwith sv_model:\n trace = pm.sample(draws=n_draws, chains=n_chains, tune=n_tune,\n random_seed=123,\n nuts_kwargs=dict(target_accept=0.9))\nparam_names = ['nu', 'sigma', 'rho', 'omega']\nprint(pm.summary(trace, varnames=param_names))\n#%% 事後分布のグラフの作成\nlabels = ['$\\\\nu$', '$\\\\sigma$', '$\\\\rho$', '$\\\\omega$']\nk = len(labels)\nx_minimum = [ 3.0, 0.15, 0.9, 0.02]\nx_maximum = [17.0, 0.85, 1.0, 0.16]\nfig1, ax1 = plt.subplots(k, 2, num=1, figsize=(8, 1.5*k), facecolor='w')\nfor index in range(k):\n mc_trace = trace[param_names[index]]\n x_min = x_minimum[index]\n x_max = x_maximum[index]\n x = np.linspace(x_min, x_max, 250)\n posterior = st.gaussian_kde(mc_trace).evaluate(x)\n ax1[index, 0].plot(mc_trace, 'k-', linewidth=0.1)\n ax1[index, 0].set_xlim(1, n_draws*n_chains)\n ax1[index, 0].set_ylabel(labels[index], fontproperties=jpfont)\n ax1[index, 1].plot(x, posterior, 'k-')\n ax1[index, 1].set_xlim(x_min, x_max)\n ax1[index, 1].set_ylim(0, 1.1*posterior.max())\n ax1[index, 1].set_ylabel('確率密度', fontproperties=jpfont)\nax1[k-1, 0].set_xlabel('乱数系列', fontproperties=jpfont)\nax1[k-1, 1].set_xlabel('周辺事後分布', fontproperties=jpfont)\nplt.tight_layout()\nplt.savefig('pybayes_fig_sv_posterior.png', dpi=300)\nplt.show()\n#%% ボラティリティのプロット\nvol = np.median(np.tile(trace['sigma'], \n (n, 1)).T * np.exp(trace['log_vol']), axis=0)\nfig2 = plt.figure(num=2, facecolor='w')\nplt.plot(series_date, y, 'k-', linewidth=0.5, label='ドル円為替レート')\nplt.plot(series_date, 2.0 * vol, 'k:', linewidth=0.5, label='2シグマ区間')\nplt.plot(series_date, -2.0 * vol, 'k:', linewidth=0.5)\nplt.xlim(series_date[0], series_date[-1])\nplt.xticks(['2014', '2015', '2016', '2017'])\nplt.xlabel('営業日', fontproperties=jpfont)\nplt.ylabel('日次変化率 (%)', fontproperties=jpfont)\nplt.legend(loc='best', frameon=False, prop=jpfont)\nplt.savefig('pybayes_fig_sv_volatility.png', dpi=300)\nplt.show()\n"
]
| [
[
"pandas.to_datetime",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.exp",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.linspace",
"scipy.stats.gaussian_kde",
"numpy.tile",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.xticks"
]
]
|
resuelve/silk-ml | [
"7cabfdc50fcbc41efb5360afae0f8081c2df2b8b"
]
| [
"silk_ml/scores.py"
]
| [
"from sklearn.metrics import confusion_matrix\n\n\ndef ls_score(y, y_predicted):\n \"\"\" Score that punishes the false negative values, that goes from -1 to 1\n\n Args:\n y (list): 1d array-like, or label indicator array / sparse matrix\n ground truth (correct) labels.\n y_predicted (list):1d array-like, or label indicator array / sparse\n matrix predicted labels, as returned by a classifier.\n\n Returns:\n float: A score between -1 and 1 that indicates the correctness of the\n classification\n \"\"\"\n conf_matrix = confusion_matrix(y, y_predicted)\n assert conf_matrix.shape == (2, 2)\n [tn, fp], [fn, tp] = conf_matrix\n assert (tp + fn) != 0\n assert (tn + fp) != 0\n tpr = tp / (tp + fn)\n tnr = tn / (tn + fp)\n return tpr * (tnr + 1) - 1\n"
]
| [
[
"sklearn.metrics.confusion_matrix"
]
]
|
yuzhou42/pyOptimalMotionPlanning | [
"e95b73e1a2442e60d623258094fe249557a9e75b"
]
| [
"pomp/planners/optimization.py"
]
| [
"from __future__ import print_function,division\nfrom builtins import range\nfrom six import iteritems\n\nfrom ..spaces.objective import ObjectiveFunction\nfrom ..spaces.sets import Set\nfrom ..spaces.controlspace import ControlSpace\nfrom ..spaces.configurationspace import ConfigurationSpace\nimport math\nimport numpy as np\n\n\n\nclass iLQR:\n \"\"\"An implementation of the iLQR trajectory optimization algorithm.\n \n This performs a conversion of obstacles into smooth barrier functions and\n optimizes the objective function augmented with the barrier value.\n \n Controls are projected onto the feasible control set at each step.\n \n Attributes:\n xref (array of size (T+1,n)): the optimized reference state trajectory\n uref (array of size (T,m)): the optimized reference control trajectory\n gains (pair of arrays): a pair (K,k) of arrays so that for each time step, \n the optimized control is given by\n \n u(x,t) ~= K[t]*(x - xref[t]) + k[t] + uref[t]\n \n K has dimension (T,m,n) and k has dimension (T,m)\n \n value (triple of T+1-lists): a triple (V,Vx,Vxx) of arrays so that for each\n time step, the quadratic expansion of the value function is given by:\n \n V(x,t) ~= 1/2 dx^T Vxx[t] dx + dx^T Vx[t] + V[t]\n \n with dx = x-xref[t]. \n \n costGradients (array of size T,m): the gradients of total cost w.r.t.\n controls.\n \"\"\"\n def __init__(self,controlSpace,objective,goalSet=None,clearanceToCostFn='square',clearanceToCostWeight=0.1):\n assert isinstance(objective,ObjectiveFunction)\n assert isinstance(goalSet,Set)\n self.controlSpace = controlSpace\n self.rawObjective = objective\n self.cspace = controlSpace.configurationSpace()\n self.goalSet = goalSet\n print(\"iLQR Objective function\",objective,\"augmented with\",clearanceToCostFn,\"barrier\")\n if clearanceToCostFn is not None:\n self.objective = BarrierAugmentedObjective(objective,controlSpace,self.goalSet,clearanceToCostFn,clearanceToCostWeight)\n else:\n self.objective = objective\n self.clearanceToCostWeight = clearanceToCostWeight\n self.adaptiveClearanceWeight = False\n self.xref = None\n self.uref = None\n self.gains = None\n self.value = None\n self.costGradients = None\n\n def run(self,x,u,maxIters,maxInnerIters=10,xtol=1e-7,gtol=1e-7,ftol=1e-7,damping=1e-5):\n if len(u)==0:\n raise ValueError(\"Cannot optimize with no controls\")\n if not isinstance(self.objective,BarrierAugmentedObjective):\n maxInnerIters = 1\n if not hasattr(x[0],'__iter__'):\n #assume its a single state\n x0 = x\n x = [x0]\n for ut in u:\n x.append(self.controlSpace.nextState(x[-1],ut))\n assert len(x) == len(u)+1\n self.xref = np.array([xt for xt in x])\n self.uref = np.array([ut for ut in u])\n T = len(u)\n m = len(u[0])\n n = len(x[0])\n self.gains = (np.zeros((T,m,n)),np.zeros((T,m)))\n self.value = (np.zeros((T+1)),np.zeros((T+1,n)),np.zeros((T+1,n,n)))\n self.costGradients = np.zeros((T,m))\n if isinstance(self.objective,BarrierAugmentedObjective):\n self.objective.setBarrierFromTrajectory(self.xref,self.uref)\n feasible = self.objective.isFeasible()\n if feasible:\n print(\"iLQR: Starting from a feasible point with clearance for state\",self.objective.barrierClearance,\"control\",self.objective.controlBarrierClearance,\"goal\",self.objective.goalBarrierClearance)\n else:\n print(\"iLQR: Starting from an infeasible point with clearance for state\",self.objective.barrierClearance,\"control\",self.objective.controlBarrierClearance,\"goal\",self.objective.goalBarrierClearance)\n else:\n feasible = True\n if not self.controlSpace.checkDerivatives(x[0],u[0]) or not self.controlSpace.checkDerivatives(x[-2],u[-1]):\n input(\"Press enter to continue >\")\n if not self.objective.checkDerivatives(x[0],u[0]) or not self.objective.checkDerivatives(x[-2],u[-1]):\n input(\"Press enter to continue >\")\n \n #first cost backup\n costTraj = self.value[0]\n costTraj[:] = self.evalCosts(self.xref,self.uref)\n print(\"INITIAL TRAJECTORY\")\n for (a,b) in zip(x,u):\n print(\" \",a,b)\n print(\" \",x[-1])\n print(\"COST TRAJECTORY\",costTraj)\n print(\"OBJECTIVE TYPE\",self.objective.__class__.__name__)\n J0 = costTraj[0]\n if not math.isfinite(J0):\n raise ValueError(\"Need to provide a feasible path as input?\")\n J0raw = self.rawObjective.cost(self.xref,self.uref)\n print(\"INITIAL AUGMENTED COST\",J0,\"TRUE COST\",J0raw)\n \n for iter in range(maxIters):\n alpha = 1.0\n for inner in range(maxInnerIters):\n self.backward()\n g = self.costGradients\n gnorm = np.linalg.norm(g)\n if gnorm < gtol:\n return True,'Convergence to stationary point'\n knorm = np.linalg.norm(self.gains[1])\n print(\"iLQR: Norm of nominal step size: %.3f, gradient norm %.3f\"%(knorm,gnorm))\n if np.dot(g.flatten(),self.gains[1].flatten()) > 0:\n print(\"WARNING: LQR step has direction reverse from gradient\")\n self.gains[1][:] = -g\n knorm = gnorm\n #test gradient descent\n #self.gains[1][:] = -g\n #print(\" Gains:\",self.gains[1])\n #print(\" Gradients\",g)\n lineSearchIters = 0\n alpha0 = alpha\n while alpha*knorm > xtol and lineSearchIters < maxIters:\n lineSearchIters += 1\n xu = self.forward(alpha)\n if xu is None:\n #failure, shrink step size\n alpha *= 0.5\n continue\n x,u = xu\n Ja = self.evalCosts(x,u,cbranch=J0) \n if Ja[0] < J0 and abs(Ja[0]-self.objective.cost(x,u)) > 1e-4:\n print(\"Uh... difference in costs?\",Ja[0],\"vs\",self.objective.cost(x,u))\n input(\"Press enter to continue >\")\n if Ja[0] < J0:\n #accept step\n self.xref = x\n self.uref = u\n self.value[0][:] = Ja\n print(\"iLQR: Step length %.3g reduced augmented cost to %.3f < %.3f\"%(alpha,Ja[0],J0))\n print(\" standard cost changed from %.3f to %.3f\"%(J0raw,self.rawObjective.cost(self.xref,self.uref)))\n #print(\" Endpoints\",x[0],x[1])\n #print(\" Controls\",u)\n if alpha == alpha0:\n #succeeded on first step, increase default step size\n alpha *= 2.5\n if alpha > 1.0:\n alpha = 1.0\n break\n else:\n #failure, shrink step size\n #print(\"Rejected step to cost\",Ja[0])\n alpha *= 0.5\n \n self.value[0][:] = Ja\n J0 = Ja[0]\n J0raw = self.rawObjective.cost(self.xref,self.uref)\n\n if alpha*knorm <= xtol or lineSearchIters == maxIters:\n print(\"iLQR: Inner iterations stalled at\",lineSearchIters,\"LS iters, step size\",alpha,\", gradient norm\",knorm,\"< tolerance\",xtol)\n break\n\n print(\"iLQR: Outer iteration done, clearance for state\",self.objective.barrierClearance,\"control\",self.objective.controlBarrierClearance,\"goal\",self.objective.goalBarrierClearance)\n #next outer iteration\n \"\"\"\n if isinstance(self.objective,BarrierAugmentedObjective):\n self.objective.barrierWeight *= 0.5\n self.objective.controlBarrierWeight *= 0.5\n self.objective.goalBarrierWeight *= 0.5\n \"\"\"\n\n if not isinstance(self.objective,BarrierAugmentedObjective) or max(self.objective.barrierWeight,self.objective.controlBarrierWeight,self.objective.goalBarrierWeight) < 1e-4:\n print(\" COST\",self.rawObjective.cost(self.xref,self.uref))\n return True,'Convergence on x'\n else:\n if isinstance(self.objective,BarrierAugmentedObjective):\n self.objective.updateBarrierFromTrajectory(self.xref,self.uref)\n Ja = self.evalCosts(self.xref,self.uref)\n if self.objective.isFeasible() and abs(Ja[0]-J0) < ftol:\n return True,'Convergence on f'\n self.value[0][:] = Ja\n J0 = Ja[0]\n J0raw = self.rawObjective.cost(self.xref,self.uref)\n print(\"AUGMENTED COST\",Ja[0],\"TRUE COST\",J0raw,\"FEASIBLE\",feasible)\n input()\n\n print(\"iLQR: final clearance for state\",self.objective.barrierClearance,\"control\",self.objective.controlBarrierClearance,\"goal\",self.objective.goalBarrierClearance)\n return False,'Max iters reached'\n \n def evalCosts(self,x,u,cbranch=float('inf')):\n \"\"\"Returns vector of value function evaluated along trajectory.\"\"\"\n T = len(u)\n assert T+1 == len(x)\n costs = np.empty(len(x))\n costs[-1] = self.objective.terminal(x[T])\n if costs[-1] > cbranch:\n costs[0] = costs[-1]\n return costs\n for i in range(T)[::-1]:\n xt = x[i]\n ut = u[i]\n c = self.objective.incremental(xt,ut)\n costs[i] = costs[i+1] + c\n if costs[i] > cbranch:\n costs[0] = costs[i]\n return costs\n return costs\n\n def backward(self,damping=1e-3):\n \"\"\"Computes the LQR backup centered around self.xref,self.uref.\n \n Will fill out self.gains, self.costGradients, and the 2nd and 3rd\n elements of self.value\n \"\"\"\n T = len(self.gains[0])\n Vx = self.objective.terminal_gradient(self.xref[T])\n Vxx = self.objective.terminal_hessian(self.xref[T])\n if np.linalg.norm(Vxx-Vxx.T) > 1e-3:\n print(\"ERROR IN TERMINAL HESSIAN\",self.xref[T])\n print(Vxx)\n raise ValueError()\n self.value[1][-1] = Vx\n self.value[2][-1] = Vxx\n print(\"iLQR BACKWARDS PASS\")\n #print(\" Terminal cost\",self.objective.terminal(self.xref[T]))\n #print(\" Terminal grad\",Vx)\n #print(\" Terminal Hessian\",Vxx)\n for i in range(T)[::-1]:\n #print(\"timestep\",i)\n xt,ut = self.xref[i],self.uref[i]\n fx,fu = self.controlSpace.nextState_jacobian(xt,ut)\n cx,cu = self.objective.incremental_gradient(xt,ut)\n cxx,cxu,cuu = self.objective.incremental_hessian(xt,ut)\n #print(\" Next state jacobian x\",fx)\n #print(\" Next state jacobian u\",fu)\n Qxx = fx.T.dot(Vxx.dot(fx))+cxx\n Quu = fu.T.dot(Vxx.dot(fu))+cuu\n Qxu = fx.T.dot(Vxx.dot(fu))+cxu\n Vxc = Vx\n Qx = cx + fx.T.dot(Vxc)\n Qu = cu + fu.T.dot(Vxc)\n if damping > 0:\n Quu = (Quu + Quu.T)*0.5\n Quu_evals, Quu_evecs = np.linalg.eig(Quu)\n Quu_evals[Quu_evals < 0] = 0.0\n Quu_evals += damping\n QuuInv = np.dot(Quu_evecs,np.dot(np.diag(1.0/Quu_evals),Quu_evecs.T))\n else:\n QuuInv = np.linalg.pinv(Quu)\n K = -QuuInv.dot(Qxu.T)\n k = -QuuInv.dot(Qu)\n temp = Qxu.dot(K)\n Vxx = Qxx + temp + temp.T + K.T.dot(Quu.dot(K))\n Vx = Qx + Qxu.dot(k) + K.T.dot(Qu+Quu.dot(k))\n #print(\" Vf grad\",Vx)\n #print(\" Vf Hessian\",Vxx)\n self.gains[0][i] = K\n self.gains[1][i] = k\n self.value[1][i] = Vx\n self.value[2][i] = Vxx\n self.costGradients[i] = Qu\n\n def forward(self,alpha=1.0):\n \"\"\"Computes the iLQR forward pass, assuming the gain matrices have been computed\"\"\"\n x = np.empty(self.xref.shape)\n u = np.empty(self.uref.shape)\n x[0] = self.xref[0]\n u[0] = self.uref[0]\n K,k = self.gains\n for i in range(self.uref.shape[0]):\n if i == 0:\n du = k[0]\n else:\n du = k[i] + K[i].dot(x[i]-self.xref[i])\n u[i] = self.uref[i] + alpha*du\n \"\"\"\n if not self.controlSpace.controlSet(x[i]).contains(u[i]):\n try:\n ui = self.controlSpace.controlSet(x[i]).project(list(u[i]))\n if ui is None:\n print(\"Projection of control failed?\")\n return None\n u[i] = ui\n except NotImplementedError:\n #projection may not be implemented... TODO: address control constraints some other way\n pass\n \"\"\"\n x[i+1] = self.controlSpace.nextState(x[i],u[i])\n return (x,u)\n \n \nclass BarrierAugmentedObjective(ObjectiveFunction):\n def __init__(self,base,controlSpace,goalSet,barrierType,barrierWeight):\n \"\"\"Barrier types can be 'log', 'inv', 'square'.\n \n Barrier function depends on distance(s) to constraints.\n - 'log': -log(d) if d > 0, inf otherwise\n - 'inv': 1/d if d > 0, inf otherwise\n - 'square': 0 if d > 0, d^2 otherwise (soft constraint)\n \"\"\"\n self.base = base\n if isinstance(controlSpace,ControlSpace):\n self.controlSpace = controlSpace\n self.cspace = controlSpace.configurationSpace()\n else:\n assert isinstance(controlSpace,ConfigurationSpace)\n self.controlSpace = None\n self.cspace = controlSpace\n self.goalSet = goalSet\n self.barrierType = barrierType\n self.barrierWeight = barrierWeight\n self.barrierClearance = 0.0\n self.barrierShift = 0.0\n self.controlBarrierWeight = barrierWeight\n self.controlBarrierClearance = 0.0\n self.controlBarrierShift = 0.0\n self.goalBarrierWeight = barrierWeight\n self.goalBarrierClearance = 0.0\n self.goalBarrierShift = 0.0\n \n def __str__(self):\n return \"Barrier-augmented \"+str(self.base)+\" barrier \"+self.barrierType\n \n def isHard(self):\n return self.barrierType in ['inv','log']\n \n def setBarrierFromTrajectory(self,x,u,scale=1.5,mindist=1e-5,firstTime=True):\n \"\"\"Evaluates the trajectory clearance and sets the barrier\n offset from a trajectory, ensuring that\n - If the barrier is hard, x,u is feasible under the shifted barrier\n - If x,u is invalid, then the max (unweighted) barrier cost evaluated\n at x,u is equal to `scale`\n \n `mindist` is used so that if the initial point is not strictly\n feasible (or is very close to the boundary) then a positive slack is given\n to the constraint.\n \"\"\"\n dmin = None\n dumin = float('inf')\n for xi,ui in zip(x,u):\n d = self.cspace.clearance(xi)\n if dmin is None:\n dmin = np.asarray(d)\n else:\n dmin = np.minimum(dmin,d)\n if self.controlSpace is not None:\n U = self.controlSpace.controlSet(xi)\n dumin = min(dumin,-U.signedDistance(ui))\n self.barrierClearance = dmin\n if self.controlSpace is None:\n self.controlBarrierClearance = 0\n else:\n self.controlBarrierClearance = dumin\n if self.goalSet is not None:\n self.goalBarrierClearance = -self.goalSet.signedDistance(x[-1])\n else:\n self.goalBarrierClearance = 0.0\n if self.isHard():\n if not firstTime:\n oldBarrierShift = self.barrierShift\n oldControlBarrierShift = self.controlBarrierShift\n oldGoalBarrierShift = self.goalBarrierShift\n self.barrierShift = np.minimum(self.barrierClearance,0.0)\n self.controlBarrierShift = min(self.controlBarrierClearance,0.0)\n self.goalBarrierShift = min(self.goalBarrierClearance,0.0)\n if self.barrierType == 'inv':\n #scale = 1/(barrierClearance - barrierShift) => barrierClearance - 1/scale = barrierShift\n self.barrierShift[self.barrierClearance < mindist] -= 1.0/scale\n if self.controlBarrierClearance < mindist: self.controlBarrierShift -= 1.0/scale\n if self.goalBarrierClearance < mindist: self.goalBarrierShift -= 1.0/scale\n elif self.barrierType == 'log':\n #scale = -log(barrierClearance - barrierShift) => barrierShift = barrierClearance - exp(-scale)\n print(self.barrierClearance < mindist,self.barrierClearance,mindist)\n self.barrierShift[self.barrierClearance < mindist] -= math.exp(-scale)\n if self.controlBarrierClearance < mindist: self.controlBarrierShift -= math.exp(-scale)\n if self.goalBarrierClearance < mindist: self.goalBarrierShift -= math.exp(-scale)\n else:\n raise ValueError(\"Invalid barrier string, only log, inv, and square are supported\")\n if not firstTime:\n self.barrierShift = np.maximum(self.barrierShift,oldBarrierShift)\n self.controlBarrierShift = max(self.controlBarrierShift,oldControlBarrierShift)\n self.goalBarrierShift = max(self.goalBarrierShift,oldGoalBarrierShift)\n print(\"Barrier clearances\",self.barrierClearance,self.controlBarrierClearance,self.goalBarrierClearance)\n print(\"Barrier shifts: state\",self.barrierShift,\"control\",self.controlBarrierShift,\"goal\",self.goalBarrierShift)\n print(\" => Cost\",self.cost(x,u))\n input()\n else:\n self.barrierShift = 0.0\n self.controlBarrierShift = 0.0\n self.goalBarrierShift = 0.0\n \n def isFeasible(self):\n return all(v >= 0 for v in self.barrierClearance) and self.controlBarrierClearance >= 0 and self.goalBarrierClearance >= 0\n \n def updateBarrierFromTrajectory(self,x,u):\n oldbc = self.barrierClearance\n oldcbc = self.controlBarrierClearance\n oldgbc = self.goalBarrierClearance\n self.setBarrierFromTrajectory(x,u,firstTime=False)\n print(\"iLQR: clearance on state\",self.barrierClearance,\"control\",self.controlBarrierClearance,\"goal\",self.goalBarrierClearance)\n cold = min(oldbc)\n c = min(self.barrierClearance)\n if c >= 0:\n if cold < 0:\n print(\"iLQR: Switched from infeasible to feasible on state constraint, clearance %.3g -> %.3g\"%(cold,c))\n else:\n self.barrierWeight *= 0.5\n print(\"iLQR: Stayed feasible on state constraint, sharpening constraint to %0.3g\"%(self.barrierWeight,))\n else:\n if cold < 0:\n self.barrierWeight *= 2.5\n print(\"iLQR: Stayed infeasible on state constraint, diffusing constraint to %0.3g\"%(self.barrierWeight,))\n else:\n print(\"iLQR: Switched from feasible to infeasible on state constraint, clearance %.3g -> %.3g\"%(cold,c))\n cold = oldcbc\n c = self.controlBarrierClearance\n if c >= 0:\n if cold < 0:\n print(\"iLQR: Switched from infeasible to feasible on control constraint, clearance %.3g -> %.3g\"%(cold,c))\n else:\n self.controlBarrierWeight *= 0.5\n print(\"iLQR: Stayed feasible on control constraint, sharpening constraint to %0.3g\"%(self.controlBarrierWeight,))\n else:\n if cold < 0:\n self.controlBarrierWeight *= 2.5\n print(\"iLQR: Stayed infeasible on control constraint, diffusing constraint to %0.3g\"%(self.controlBarrierWeight,))\n else:\n print(\"iLQR: Switched from feasible to infeasible on control constraint, clearance %.3g -> %.3g\"%(cold,c))\n cold = oldgbc\n c = self.goalBarrierClearance\n if c >= 0:\n if cold < 0:\n print(\"iLQR: Switched from infeasible to feasible on goal constraint, clearance %.3g -> %.3g\"%(cold,c))\n else:\n self.goalBarrierWeight *= 0.5\n print(\"iLQR: Stayed feasible on goal constraint, sharpening constraint to %0.3g\"%(self.goalBarrierWeight,))\n else:\n if cold < 0:\n self.goalBarrierWeight *= 2.5\n print(\"iLQR: Stayed infeasible on goal constraint, diffusing constraint to %0.3g\"%(self.goalBarrierWeight,))\n else:\n print(\"iLQR: Switched from feasible to infeasible on goal constraint, clearance %.3g -> %.3g\"%(cold,c))\n \n def barrierFn(self,c):\n if self.barrierType == 'inv':\n if c <= 0: return float('inf')\n return 1.0/c\n elif self.barrierType == 'log':\n if c <= 0: return float('inf')\n if math.isinf(c): return 0\n return -math.log(c)\n elif self.barrierType == 'square':\n if c < 0: return c**2\n return 0\n else:\n raise ValueError(\"Invalid barrier function\")\n \n def barrierDeriv(self,c):\n if self.barrierType == 'inv':\n if c <= 0: dc = 0.0\n else: dc= -1.0/c**2\n elif self.barrierType == 'log':\n if c <= 0: dc = 0.0\n elif math.isinf(c): dc = 0.0\n else: dc = -1.0/c\n elif self.barrierType == 'square':\n if c < 0: dc = 2*c\n else: dc = 0\n return dc\n \n def barrierDeriv2(self,c):\n if self.barrierType == 'inv':\n if c <= 0: dc = 0.0\n else: dc= 2.0/c**3\n elif self.barrierType == 'log':\n if c <= 0: dc = 0.0\n elif math.isinf(c): dc = 0.0\n else: dc = 1.0/c**2\n elif self.barrierType == 'square':\n if c < 0: dc = 2\n else: dc = 0\n return dc\n \n def barrier(self,x):\n c = self.cspace.clearance(x) - self.barrierShift\n if hasattr(c,'__iter__'):\n return self.barrierWeight*sum(self.barrierFn(v) for v in c)\n else:\n return self.barrierWeight*self.barrierFn(c)\n \n def barrier_gradient(self,x):\n c = self.cspace.clearance(x) - self.barrierShift\n g = self.cspace.clearance_gradient(x)\n if hasattr(c,'__iter__'):\n return self.barrierWeight*sum(self.barrierDeriv(v)*gi for v,gi in zip(c,g))\n else:\n return self.barrierWeight*self.barrierDeriv(c)*g\n \n def barrier_hessian(self,x):\n c = self.cspace.clearance(x) - self.barrierShift\n g = self.cspace.clearance_gradient(x)\n if hasattr(c,'__iter__'):\n return self.barrierWeight*sum(self.barrierDeriv2(v)*np.outer(gi,gi) for v,gi in zip(c,g))\n else:\n return self.barrierWeight*self.barrierDeriv2(c)*np.outer(g,g)\n\n def controlBarrier(self,x,u):\n if self.controlSpace is None: return 0.0\n U = self.controlSpace.controlSet(x)\n c = -U.signedDistance(u) - self.controlBarrierShift\n if U.signedDistance(u) <= 0:\n assert U.contains(u),\"Control set %s signed distance %f but doesn't contain %s\"%(str(U),U.signedDistance(u),str(u))\n else:\n assert not U.contains(u),\"Control set %s signed distance %f but contains %s\"%(str(U),U.signedDistance(u),str(u))\n return self.controlBarrierWeight*self.barrierFn(c)\n \n def controlBarrier_gradient(self,x,u):\n if self.controlSpace is None: return None\n U = self.controlSpace.controlSet(x)\n c = -U.signedDistance(u) - self.controlBarrierShift\n g = -U.signedDistance_gradient(u)\n return self.controlBarrierWeight*self.barrierDeriv(c)*g\n \n def controlBarrier_hessian(self,x,u):\n if self.controlSpace is None: return None\n U = self.controlSpace.controlSet(x)\n c = -U.signedDistance(u) - self.controlBarrierShift\n g = -U.signedDistance_gradient(u)\n return self.controlBarrierWeight*self.barrierDeriv2(c)*np.outer(g,g)\n \n def goalBarrier(self,x):\n c = -self.goalSet.signedDistance(x) - self.goalBarrierShift\n return self.goalBarrierWeight*self.barrierFn(c)\n \n def goalBarrier_gradient(self,x):\n c = -self.goalSet.signedDistance(x) - self.goalBarrierShift\n g = -self.goalSet.signedDistance_gradient(x)\n return self.goalBarrierWeight*self.barrierDeriv(c)*g\n \n def goalBarrier_hessian(self,x):\n c = -self.goalSet.signedDistance(x) - self.goalBarrierShift\n g = -self.goalSet.signedDistance_gradient(x)\n return self.goalBarrierWeight*self.barrierDeriv2(c)*np.outer(g,g)\n \n def incremental(self,x,u=None):\n res = self.base.incremental(x,u)+self.barrier(x)\n if u is not None and self.controlSpace is not None:\n res += self.controlBarrier(x,u)\n return res\n \n def terminal(self,x):\n if self.goalSet is not None:\n return self.base.terminal(x)+self.goalBarrier(x)+self.barrier(x)\n return self.base.terminal(x)+self.barrier(x)\n \n \"\"\"\n def incremental_gradient(self,x,u):\n return self.incremental_gradient_diff(x,u)\n def incremental_hessian(self,x,u):\n return self.incremental_hessian_diff(x,u)\n def terminal_gradient(self,x):\n return self.terminal_gradient_diff(x)\n def terminal_hessian(self,x):\n return self.terminal_hessian_diff(x)\n \"\"\"\n def incremental_gradient(self,x,u):\n bx,bu = self.base.incremental_gradient(x,u)\n if u is not None and self.controlSpace is not None:\n bu += self.controlBarrier_gradient(x,u)\n return bx+self.barrier_gradient(x),bu\n \n def incremental_hessian(self,x,u):\n Hx,Hxu,Hu = self.base.incremental_hessian(x,u)\n if u is not None and self.controlSpace is not None:\n Hu += self.controlBarrier_hessian(x,u)\n return Hx+self.barrier_hessian(x),Hxu,Hu\n \n def terminal_gradient(self,x):\n if self.goalSet is not None:\n return self.base.terminal_gradient(x)+self.goalBarrier_gradient(x)+self.barrier_gradient(x)\n return self.base.terminal_gradient(x)+self.barrier_gradient(x)\n \n def terminal_hessian(self,x):\n if self.goalSet is not None:\n return self.base.terminal_hessian(x)+self.goalBarrier_hessian(x)+self.barrier_hessian(x)\n return self.base.terminal_hessian(x)+self.barrier_hessian(x)\n"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.minimum",
"numpy.linalg.pinv",
"numpy.linalg.eig",
"numpy.outer",
"numpy.diag",
"numpy.maximum"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.